rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
'{outbound} method %s()", not found' % (action)
'{outbound} method %s()", not found' % action
def outbound(cls, action): """ Find the handler B{outbound} method for the specified I{action}. @param action: The I{action} part of an event subject. @type action: str @return: The handler instance method. @rtype: instancemethod """ mutex.acquire() try: method = cls.outbounds.get(action) if method is None: raise Exception,\ '{outbound} method %s()", not found' % (action) else: return method finally: mutex.release()
raise threading.ThreadError('Thread is not active')
raise _ThreadInterruptionError('Thread is not active')
def _tid(thread): """ Determine a thread's id. """ if not thread.is_alive(): raise threading.ThreadError('Thread is not active') if hasattr(thread, '_thread_id'): return thread._thread_id for tid, tobj in threading._active.items(): if tobj is thread: thread._thread_id = tid return tid raise AssertionError('Could not determine thread id')
raise AssertionError('Could not determine thread id')
raise _ThreadInterruptionError('Could not determine thread id')
def _tid(thread): """ Determine a thread's id. """ if not thread.is_alive(): raise threading.ThreadError('Thread is not active') if hasattr(thread, '_thread_id'): return thread._thread_id for tid, tobj in threading._active.items(): if tobj is thread: thread._thread_id = tid return tid raise AssertionError('Could not determine thread id')
raise ValueError('Invalid thread id')
raise _ThreadInterruptionError('Invalid thread id')
def _raise_exception_in_thread(tid, exc_type): """ Raises an exception in the threads with id tid. """ assert inspect.isclass(exc_type) # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_type) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, exc_ptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect null_ptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, null_ptr) raise SystemError('PyThreadState_SetAsyncExc failed')
raise SystemError('PyThreadState_SetAsyncExc failed')
raise _ThreadInterruptionError('PyThreadState_SetAsyncExc failed')
def _raise_exception_in_thread(tid, exc_type): """ Raises an exception in the threads with id tid. """ assert inspect.isclass(exc_type) # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_type) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, exc_ptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect null_ptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, null_ptr) raise SystemError('PyThreadState_SetAsyncExc failed')
_raise_exception_in_thread(_tid(self), exc_type)
_raise_exception_in_thread(_tid(thread), exc_type)
def raise_exception(self, exc_type): """ Raise and exception in this thread. NOTE this is executed in the context of the calling thread and blocks until the exception has been delivered to this thread and this thread exists. """ # first, kill off all the descendants for thread in get_descendants(self): while thread.is_alive(): try: _raise_exception_in_thread(_tid(self), exc_type) time.sleep(self.__default_timeout) except (threading.ThreadError, AssertionError, ValueError, SystemError), e: _log.error('Failed to deliver exception %s to thread[%s]: %s' % (exc_type.__name__, str(self.ident), e.message)) break remove_subtree(self) # then kill and wait for the task thread while not self.__exception_event.is_set(): try: _raise_exception_in_thread(_tid(self), exc_type) self.__exception_event.wait(self.__default_timeout) except (threading.ThreadError, AssertionError, ValueError, SystemError), e: _log.error('Failed to deliver exception %s to thread[%s]: %s' % (exc_type.__name__, str(self.ident), e.message)) break
except (threading.ThreadError, AssertionError, ValueError, SystemError), e:
except _ThreadInterruptionError, e:
def raise_exception(self, exc_type): """ Raise and exception in this thread. NOTE this is executed in the context of the calling thread and blocks until the exception has been delivered to this thread and this thread exists. """ # first, kill off all the descendants for thread in get_descendants(self): while thread.is_alive(): try: _raise_exception_in_thread(_tid(self), exc_type) time.sleep(self.__default_timeout) except (threading.ThreadError, AssertionError, ValueError, SystemError), e: _log.error('Failed to deliver exception %s to thread[%s]: %s' % (exc_type.__name__, str(self.ident), e.message)) break remove_subtree(self) # then kill and wait for the task thread while not self.__exception_event.is_set(): try: _raise_exception_in_thread(_tid(self), exc_type) self.__exception_event.wait(self.__default_timeout) except (threading.ThreadError, AssertionError, ValueError, SystemError), e: _log.error('Failed to deliver exception %s to thread[%s]: %s' % (exc_type.__name__, str(self.ident), e.message)) break
repo = self.pconn.repository(id)
repo = self.get_repo(id)
def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) syncs = self.pconn.sync_list(id) print_header(_('Status for %s') % id) print _('Repository: %s') % repo['id'] print _('Number of Packages: %d') % repo['package_count'] last_sync = repo['last_sync'] if last_sync is None: last_sync = 'never' else: last_sync = str(parse_date(last_sync)) print _('Last Sync: %s') % last_sync if not syncs or syncs[0]['state'] not in ('waiting', 'running'): return print _('Currently syncing:'), if syncs[0]['progress'] is None: print _('progress unknown') else: pkgs_left = syncs[0]['progress']['items_left'] pkgs_total = syncs[0]['progress']['items_total'] bytes_left = float(syncs[0]['progress']['size_left']) bytes_total = float(syncs[0]['progress']['size_total']) percent = (bytes_total - bytes_left) / bytes_total print _('%d%% done (%d of %d packages downloaded)') % \ (int(percent), (pkgs_total - pkgs_left), pkgs_total)
repo = self.pconn.repository(id)
repo = self.get_repo(id)
def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) files = repo['files'] packages = self.pconn.packages(id) print_header(_('Contents of %s') % id) print _('files in %s:') % id if not files: print _(' none') else: for f in sorted(repo['files']): print ' ' + f print _('packages in %s:') % id if not packages: print _(' none') else: for p in sorted(packages, key=lambda p: p['filename']): print ' ' + p['filename']
repo = self.pconn.repository(id) if not repo: system_exit(os.EX_DATAERR, _("Repository with id: [%s] not found") % id)
repo = self.get_repo(id)
def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) if not repo: system_exit(os.EX_DATAERR, _("Repository with id: [%s] not found") % id) optdict = vars(self.opts) for k, v in optdict.items(): if not v: continue method = self.find(k) if method: # special method stale = method(repo, v) if stale: repo = self.pconn.repository(id) continue if k in repo: repo[k] = v self.pconn.update(repo) print _("Successfully updated repository [ %s ]") % repo['id']
if relative_path is None and r['source'] is not None : url_parse = urlparse(str(r['source']["url"])) r['relative_path'] = url_parse.path
if relative_path is None: if r['source'] is not None : url_parse = urlparse(str(r['source']["url"])) r['relative_path'] = url_parse.path else: r['relative_path'] = r['id']
def create(self, id, name, arch, feed=None, symlinks=False, sync_schedule=None, cert_data=None, groupid=None, relative_path=None): """ Create a new Repository object and return it """ repo = self.repository(id) if repo is not None: raise PulpException("A Repo with id %s already exists" % id) self._validate_schedule(sync_schedule)
self.objectdb.remove(repo, safe=True)
self.objectdb.remove({'id' : id}, safe=True)
def delete(self, id): repo = self._get_existing_repo(id) repo_sync.delete_schedule(repo) repo_location = "%s/%s" % (config.config.get('paths', 'local_storage'), "repos") #delete any data associated to this repo for field in ['relative_path', 'cert', 'key', 'ca']: if field == 'relative_path' and repo[field]: fpath = os.path.join(repo_location, repo[field]) else: fpath = repo[field] if fpath and os.path.exists(fpath): try: if os.path.isfile(fpath): os.remove(fpath) else: # os.path.isdir(fpath): shutil.rmtree(fpath) log.error("removing repo files .... %s" % fpath) except: #file removal failed raise log.error("Unable to cleanup file %s " % fpath) continue self.objectdb.remove(repo, safe=True)
default_to_publish = config.config.get('repos', 'default_to_published')
default_to_publish = \ config.config.getboolean('repos', 'default_to_published')
def create(self, id, name, arch, feed=None, symlinks=False, sync_schedule=None, cert_data=None, groupid=None, relative_path=None, gpgkeys=[]): """ Create a new Repository object and return it """ repo = self.repository(id) if repo is not None: raise PulpException("A Repo with id %s already exists" % id) self._validate_schedule(sync_schedule)
log.debug("Skipping update of groups metadata since missing repomd file: '%s'" %
log.warn("Skipping update of groups metadata since missing repomd file: '%s'" %
def _update_groups_metadata(self, repoid): """ Updates the groups metadata (example: comps.xml) for a given repo @param repoid: repo id @return: True if metadata was successfully updated, otherwise False """ repo = self._get_existing_repo(repoid) try: # If the repomd file is not valid, or if we are missingg # a group metadata file, no point in continuing. if not os.path.exists(repo["repomd_xml_path"]): log.debug("Skipping update of groups metadata since missing repomd file: '%s'" % (repo["repomd_xml_path"])) return False xml = comps_util.form_comps_xml(repo['packagegroupcategories'], repo['packagegroups']) if repo["group_xml_path"] == "": repo["group_xml_path"] = os.path.dirname(repo["repomd_xml_path"]) repo["group_xml_path"] = os.path.join(os.path.dirname(repo["repomd_xml_path"]), "comps.xml") self.update(repo) f = open(repo["group_xml_path"], "w") f.write(xml.encode("utf-8")) f.close() if repo["group_gz_xml_path"]: gz = gzip.open(repo["group_gz_xml_path"], "wb") gz.write(xml.encode("utf-8")) gz.close() return comps_util.update_repomd_xml_file(repo["repomd_xml_path"], repo["group_xml_path"], repo["group_gz_xml_path"]) except Exception, e: log.debug("_update_groups_metadata exception caught: %s" % (e)) log.debug("Traceback: %s" % (traceback.format_exc())) return False
log.debug("_update_groups_metadata exception caught: %s" % (e)) log.debug("Traceback: %s" % (traceback.format_exc()))
log.warn("_update_groups_metadata exception caught: %s" % (e)) log.warn("Traceback: %s" % (traceback.format_exc()))
def _update_groups_metadata(self, repoid): """ Updates the groups metadata (example: comps.xml) for a given repo @param repoid: repo id @return: True if metadata was successfully updated, otherwise False """ repo = self._get_existing_repo(repoid) try: # If the repomd file is not valid, or if we are missingg # a group metadata file, no point in continuing. if not os.path.exists(repo["repomd_xml_path"]): log.debug("Skipping update of groups metadata since missing repomd file: '%s'" % (repo["repomd_xml_path"])) return False xml = comps_util.form_comps_xml(repo['packagegroupcategories'], repo['packagegroups']) if repo["group_xml_path"] == "": repo["group_xml_path"] = os.path.dirname(repo["repomd_xml_path"]) repo["group_xml_path"] = os.path.join(os.path.dirname(repo["repomd_xml_path"]), "comps.xml") self.update(repo) f = open(repo["group_xml_path"], "w") f.write(xml.encode("utf-8")) f.close() if repo["group_gz_xml_path"]: gz = gzip.open(repo["group_gz_xml_path"], "wb") gz.write(xml.encode("utf-8")) gz.close() return comps_util.update_repomd_xml_file(repo["repomd_xml_path"], repo["group_xml_path"], repo["group_gz_xml_path"]) except Exception, e: log.debug("_update_groups_metadata exception caught: %s" % (e)) log.debug("Traceback: %s" % (traceback.format_exc())) return False
help=_("Package filename to remove to this repository"))
help=_("Package filename to remove from this repository"))
def setup_parser(self): super(RemovePackages, self).setup_parser() self.parser.add_option("-p", "--package", action="append", dest="pkgname", help=_("Package filename to remove to this repository"))
help=_("Errata Id to delete to this repository"))
help=_("Errata Id to delete from this repository"))
def setup_parser(self): super(RemoveErrata, self).setup_parser() self.parser.add_option("-e", "--errata", action="append", dest="errataid", help=_("Errata Id to delete to this repository"))
self.assertTrue(False)
def test_update_delete_schedule(self): ''' Tests multiple updates to a repo's sync schedule and the case where multiple updates are created with no schedules. '''
rmd = yum.repoMDObject.RepoMD("temp_pulp", repomd_path)
rmd = yum.repoMDObject.RepoMD("temp_pulp", path)
def get_repomd_filetype_path(path, filetype): """ @param path: path to repo @param filetype: metadata type to query, example "group", "primary", etc @return: Path for filetype, or None """ rmd = yum.repoMDObject.RepoMD("temp_pulp", repomd_path) if rmd: data = rmd.getData(filetype) return data.location[1] return None
def POST(self):
def POST(self, id):
def POST(self): """ @return: True on successful update or repository meta data """ repo = self.input() API.update(repo) return self.output(True)
@return: True on successful update or repository meta data
@return: True on successful update of repository meta data
def POST(self): """ @return: True on successful update or repository meta data """ repo = self.input() API.update(repo) return self.output(True)
user = api.create(login=user_data['login'], password=user_data['password'], name=user_data['name'])
user = api.create(user_data['login'], user_data['password'], user_data['name'])
def PUT(self): """ Create a new user @return: user that was created """ user_data = self.params() user = api.create(login=user_data['login'], password=user_data['password'], name=user_data['name']) return self.created(user['id'], user)
def _raise_exception_in_thread(tid, exctype):
def _raise_exception_in_thread(tid, exc_type):
def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) excptr = ctypes.py_object(exctype) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, excptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect nullptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, nullptr) raise SystemError('PyThreadState_SetAsyncExc failed')
if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)')
assert inspect.isclass(exc_type)
def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) excptr = ctypes.py_object(exctype) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, excptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect nullptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, nullptr) raise SystemError('PyThreadState_SetAsyncExc failed')
excptr = ctypes.py_object(exctype) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, excptr)
long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_type) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, exc_ptr)
def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) excptr = ctypes.py_object(exctype) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, excptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect nullptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, nullptr) raise SystemError('PyThreadState_SetAsyncExc failed')
nullptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, nullptr)
null_ptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, null_ptr)
def _raise_exception_in_thread(tid, exctype): """ Raises an exception in the threads with id tid. """ if not inspect.isclass(exctype): raise TypeError('Only types can be raised (not instances)') # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) excptr = ctypes.py_object(exctype) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, excptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect nullptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, nullptr) raise SystemError('PyThreadState_SetAsyncExc failed')
def raise_exception(self, exctype):
def raise_exception(self, exc_type):
def _tid(self): """ Determine this thread's id.
Raises the given exception type in the context of this thread. If the thread is busy in a system call (time.sleep(), socket.accept(), ...) the exception is simply ignored. If you are sure that your exception should terminate the thread, one way to ensure that it works is: t = InterruptableThread(...) ... t.raise_exception(SomeException) while t.isAlive(): time.sleep(0.1) t.raise_exception(SomeException) If the exception is to be caught by the thread, you need a way to check that your thread has caught it. CAREFUL : this method is executed in the context of the caller thread, to raise an exception in the context of the thread represented by this instance.
def raise_exception(self, exctype): """ Raises the given exception type in the context of this thread.
_raise_exception_in_thread(self._tid, exctype)
try: while self.is_alive(): _raise_exception_in_thread(self._tid, exc_type) self.join(self._default_timeout) except threading.ThreadError: pass
def raise_exception(self, exctype): """ Raises the given exception type in the context of this thread.
_default_sleep = 0.0005 def _ensure_exception(self, exctype): """ Ensure that the exception gets raised in the thread or that the thread is already dead. @type exctype: type or class @param exctype: type or class of exception to raise in the tread """ try: while self.is_alive(): self.raise_exception(exctype) time.sleep(self._default_sleep) except threading.ThreadError: pass
def raise_exception(self, exctype): """ Raises the given exception type in the context of this thread.
self._ensure_exception(TimeoutException)
self.raise_exception(TimeoutException)
def timeout(self): """ Raise a TimeoutException in the thread. """ self._ensure_exception(TimeoutException)
help="common repository name")
help="Common repository name")
def generate_options(self):
help="package arch the repo should support.")
help="Package arch the repo should support.")
def generate_options(self):
if "updateinfo" in ftypes and not skip.has_key('errata') or skip['errata'] != 1:
if "updateinfo" in ftypes and (not skip.has_key('errata') or skip['errata'] != 1):
def add_packages_from_dir(self, dir, repo, skip={}): added_packages = {} added_errataids = [] if not skip.has_key('packages') or skip['packages'] != 1: startTime = time.time() log.debug("Begin to add packages from %s into %s" % (dir, repo['id'])) package_list = pulp.server.util.get_repo_packages(dir) log.debug("Processing %s potential packages" % (len(package_list))) for package in package_list: package = self.import_package(package, repo, repo_defined=True) if (package is not None): added_packages[package["id"]] = package endTime = time.time() log.debug("Repo: %s read [%s] packages took %s seconds" % (repo['id'], len(added_packages), endTime - startTime)) else: log.info("Skipping package imports from sync process") if not skip.has_key('distribution') or skip['distribution'] != 1: # process kickstart files/images part of the repo self._process_repo_images(dir, repo) else: log.info("skipping distribution imports from sync process") # Import groups metadata if present repomd_xml_path = os.path.join(dir.encode("ascii", "ignore"), 'repodata/repomd.xml') if os.path.isfile(repomd_xml_path): repo["repomd_xml_path"] = repomd_xml_path ftypes = pulp.server.util.get_repomd_filetypes(repomd_xml_path) log.debug("repodata has filetypes of %s" % (ftypes)) if "group" in ftypes: group_xml_path = pulp.server.util.get_repomd_filetype_path(repomd_xml_path, "group") group_xml_path = os.path.join(dir.encode("ascii", "ignore"), group_xml_path) if os.path.isfile(group_xml_path): groupfile = open(group_xml_path, "r") repo['group_xml_path'] = group_xml_path self.sync_groups_data(groupfile, repo) log.info("Loaded group info from %s" % (group_xml_path)) else: log.info("Group info not found at file: %s" % (group_xml_path)) if "group_gz" in ftypes: group_gz_xml_path = pulp.server.util.get_repomd_filetype_path( repomd_xml_path, "group_gz") group_gz_xml_path = os.path.join(dir.encode("ascii", "ignore"), group_gz_xml_path) repo['group_gz_xml_path'] = group_gz_xml_path if "updateinfo" in ftypes and not skip.has_key('errata') or skip['errata'] != 1: updateinfo_xml_path = pulp.server.util.get_repomd_filetype_path( repomd_xml_path, "updateinfo") updateinfo_xml_path = os.path.join(dir.encode("ascii", "ignore"), updateinfo_xml_path) log.info("updateinfo is found in repomd.xml, it's path is %s" % \ (updateinfo_xml_path)) added_errataids = self.sync_updateinfo_data(updateinfo_xml_path, repo) log.debug("Loaded updateinfo from %s for %s" % \ (updateinfo_xml_path, repo["id"])) else: log.info("Skipping errata imports from sync process") return added_packages, added_errataids
log.debug("%s" % (traceback.format_exc()))
log.error("%s" % (traceback.format_exc()))
def import_package(self, package, repo): try: retval = None file_name = package.relativepath hashtype = "sha256" checksum = package.checksum found = self.package_api.packages(name=package.name, epoch=package.epoch, version=package.version, release=package.release, arch=package.arch, filename=file_name, checksum_type=hashtype, checksum=checksum) if len(found) == 1: retval = found[0] else: retval = self.package_api.create(package.name, package.epoch, package.version, package.release, package.arch, package.description, hashtype, checksum, file_name) for dep in package.requires: retval.requires.append(dep[0]) for prov in package.provides: retval.provides.append(prov[0]) retval.download_url = config.config.get('server', 'base_url') + "/" + \ config.config.get('server', 'relative_url') + "/" + \ repo["id"] + "/" + file_name self.package_api.update(retval) return retval except Exception, e: log.error("error reading package %s" % (file_name)) log.debug("%s" % (traceback.format_exc()))
self.parser.add_option("--name", dest="name", help="Consumer group name")
self.parser.add_option("--id", dest="id", help="Consumer group id")
def generate_options(self):
self.options.label)
self.options.id)
def _delete(self): (self.options, self.args) = self.parser.parse_args() if not self.options.id: print("Group id required. Try --help") sys.exit(0) try: self.cgconn.delete(id=self.options.id) print _(" Successful deleted Consumer Group [ %s ] " % self.options.id) except RestlibException, re: print _(" Deleted operation failed Consumer Group [ %s ] " % \ self.options.label) log.error("Error: %s" % re) sys.exit(-1) except Exception, e: print _(" Deleted operation failed on Consumer Group [ %s ]. " % \ self.options.label) log.error("Error: %s" % e) sys.exit(-1)
repo_source.url)
repo_source['url'])
def sync(self, repo, repo_source): # Parse the repo source for necessary pieces # Expected format: <server>/<channel> pieces = repo_source['url'].split('/') if len(pieces) < 2: raise PulpException('Feed format for RHN type must be <server>/<channel>. Feed: %s', repo_source.url)
self.repo_api.create('repo-1', 'repo-1', 'i386', 'yum:localhost', '1 * * * *') self.repo_api.create('repo-2', 'repo-2', 'i386', 'yum:localhost', '2 * * * *') self.repo_api.create('repo-3', 'repo-3', 'i386', 'yum:localhost', None)
self.repo_api.create('repo-1', 'repo-1', 'i386', 'yum:localhost', sync_schedule='1 * * * *') self.repo_api.create('repo-2', 'repo-2', 'i386', 'yum:localhost', sync_schedule='2 * * * *') self.repo_api.create('repo-3', 'repo-3', 'i386', 'yum:localhost', sync_schedule=None)
def test_all_schedules(self): # Setup self.repo_api.create('repo-1', 'repo-1', 'i386', 'yum:localhost', '1 * * * *') self.repo_api.create('repo-2', 'repo-2', 'i386', 'yum:localhost', '2 * * * *') self.repo_api.create('repo-3', 'repo-3', 'i386', 'yum:localhost', None) self.repo_api.create('repo-4', 'repo-4', 'i386', 'yum:localhost')
if now - task.start_time < self.timeout:
if task.start_time is None or now - task.start_time < self.timeout:
def _timeout_tasks(self): """ Stop tasks that have met or exceeded the queue's timeout length. """ if self.timeout is None: return running_tasks = self.__storage.running_tasks() if not running_tasks: return now = datetime.now() for task in running_tasks: if now - task.start_time < self.timeout: continue thread = self.__threads[task] thread.timeout() while task.state not in task_complete_states: time.sleep(self._default_sleep) task.timeout()
while task.state not in task_complete_states: time.sleep(self._default_sleep)
self._wait_for_task(task)
def _timeout_tasks(self): """ Stop tasks that have met or exceeded the queue's timeout length. """ if self.timeout is None: return running_tasks = self.__storage.running_tasks() if not running_tasks: return now = datetime.now() for task in running_tasks: if now - task.start_time < self.timeout: continue thread = self.__threads[task] thread.timeout() while task.state not in task_complete_states: time.sleep(self._default_sleep) task.timeout()
while task.state not in task_complete_states: time.sleep(self._default_sleep)
self._wait_for_task(task)
def cancel(self, task): self.__lock.acquire() try: thread = self.__threads[task] thread.cancel() while task.state not in task_complete_states: time.sleep(self._default_sleep) task.cancel() finally: self.__lock.release()
(con["id"], con["description"], con["repoids"], con["package_profile"], con["key_value_pairs"])
(con["id"], con["description"], \ con["repoids"],con["key_value_pairs"])
def run(self): key = self.opts.key value = self.opts.value cons = self.cconn.consumers() baseurl = "%s://%s:%s" % (_cfg.server.scheme, _cfg.server.host, _cfg.server.port) for con in cons: con['package_profile'] = urlparse.urljoin(baseurl, con['package_profile']) if key is None: print_header(_("Consumer Information")) for con in cons: print constants.AVAILABLE_CONSUMER_INFO % \ (con["id"], con["description"], con["repoids"], con["package_profile"], con["key_value_pairs"]) system_exit(os.EX_OK)
super(Create, self).setup_parser()
self.parser.add_option('--id', dest='id', help=_("consumer identifier eg: foo.example.com (required)"))
def setup_parser(self): super(Create, self).setup_parser() self.parser.add_option("--description", dest="description", help=_("consumer description eg: foo's web server"))
if os.path.isdir(fn): continue
def keyfiles(self): """ Get a list of GPG key files at the specified I{path}. @param path: An absolute path to a file containing a GPG key. @type path: str @return: A list of tuples: (key-path, key-content) @rtype: list """ keys = [] pattern = '----BEGIN PGP PUBLIC KEY BLOCK-----' path = os.path.join(keydir(), self.path) for fn in os.listdir(path): if os.path.isdir(fn): continue for ext in ('.rpm','.gz','.xml'): if fn.endswith(ext): continue try: fp = os.path.join(path, fn) f = open(fp) content = f.read() if pattern in content: keys.append((fp, content)) f.close() except: log.error(fp, exc_info=True) return keys
self.parser.add_option('--username', dest='username',
self.parser.add_option('-u', '--username', dest='username',
def setup_parser(self): self.parser.add_option('--username', dest='username', help=_('pulp account username')) self.parser.add_option('--password', dest='password', help=_('pulp account password'))
self.parser.add_option('--password', dest='password',
self.parser.add_option('-p', '--password', dest='password',
def setup_parser(self): self.parser.add_option('--username', dest='username', help=_('pulp account username')) self.parser.add_option('--password', dest='password', help=_('pulp account password'))
def _print_sync_finsih(self, state, progress):
def _print_sync_finish(self, state, progress):
def _print_sync_finsih(self, state, progress): self._print_sync_progress(progress) print '' print _('Sync: %s') % state.title()
def test_loadConfig(self):
def test_load_config(self):
def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp')
origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini'
orig_file = os.path.abspath(os.path.dirname(__file__)) + '/../../etc/pulp/pulp.ini' override_file = os.path.abspath(os.path.dirname(__file__)) + '/../common/test-override-pulp.ini'
def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp')
config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp')
config = loadConfig(orig_file) self.assertEqual(config.get('paths', 'local_storage'), '/var/lib/pulp')
def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp')
config = loadConfig(overrideFile, config=config) assert(config.get('paths', 'http_mount') == '/tmp/pulp')
config = loadConfig(override_file, config=config) self.assertEqual(config.get('paths', 'local_storage'), '/tmp/pulp')
def test_loadConfig(self): # Setup origFile = '../../etc/pulp.ini' overrideFile = './data/test-override-pulp.ini' # Test & Verify config = loadConfig(origFile) self.assertEqual(config.get('paths', 'http_mount'), '/var/www/pulp')
help=_("schedule for automatically synchronizing the repository"))
help=_("cron entry date and time syntax for scheduling automatic repository synchronizations"))
def setup_parser(self): super(Create, self).setup_parser() self.parser.add_option("--name", dest="name", help=_("common repository name")) self.parser.add_option("--arch", dest="arch", help=_("package arch the repository should support")) self.parser.add_option("--feed", dest="feed", help=_("url feed to populate the repository")) self.parser.add_option("--cacert", dest="cacert", help=_("path location to ca certificate")) self.parser.add_option("--cert", dest="cert", help=_("path location to entitlement certificate")) self.parser.add_option("--key", dest="key", help=_("path location to entitlement certificate key")) self.parser.add_option("--schedule", dest="schedule", help=_("schedule for automatically synchronizing the repository")) self.parser.add_option("--symlinks", action="store_true", dest="symlinks", help=_("use symlinks instead of copying bits locally; applicable for local syncs")) self.parser.add_option("--relativepath", dest="relativepath", help=_("relative path where the repository is stored and exposed to clients; this defaults to feed path if not specified")) self.parser.add_option("--groupid", action="append", dest="groupid", help=_("a group to which the repository belongs; this is just a string identifier")) self.parser.add_option("--keys", dest="keys", help=_("a ',' separated list of directories and/or files contining GPG keys"))
help=_("schedule for automatically synchronizing the repository"))
help=_("cron entry date and time syntax for scheduling automatic repository synchronizations"))
def setup_parser(self): super(Update, self).setup_parser() self.parser.add_option("--name", dest="name", help=_("common repository name")) self.parser.add_option("--arch", dest="arch", help=_("package arch the repository should support")) self.parser.add_option("--feed", dest="feed", help=_("url feed to populate the repository")) self.parser.add_option("--cacert", dest="cacert", help=_("path location to ca certificate")) self.parser.add_option("--cert", dest="cert", help=_("path location to entitlement certificate key")) self.parser.add_option("--schedule", dest="sync_schedule", help=_("schedule for automatically synchronizing the repository")) self.parser.add_option("--symlinks", action="store_true", dest="symlinks", help=_("use symlinks instead of copying bits locally; applicable for local syncs")) self.parser.add_option("--relativepath", dest="relativepath", help=_("relative path where the repository is stored and exposed to clients; this defaults to feed path if not specified")) self.parser.add_option("--groupid", dest="groupid", help=_("a group to which the repository belongs; this is just a string identifier")) self.parser.add_option("--addkeys", dest="addkeys", help=_("a ',' separated list of directories and/or files contining GPG keys")) self.parser.add_option("--rmkeys", dest="rmkeys", help=_("a ',' separated list of GPG key names"))
while not isinstance(task.exception, TimeoutException):
while task.state not in task_complete_states:
def _timeout_tasks(self): """ """ if self.timeout is None: return running_tasks = self.__storage.running_tasks() if not running_tasks: return now = datetime.now() for task in running_tasks: if now - task.start_time < self.timeout: continue thread = self.__threads[task] # this will cause a deadlock because we are holding the lock and the # task needs to call self.complete which tries to grab the lock and # thread.timeout waits for the task! (actually we don't wait for the # task, so there may not be a problem) thread.timeout() while not isinstance(task.exception, TimeoutException): time.sleep(0.0005) task.timeout()
while not isinstance(task.exception, CancelException):
while task.state not in task_complete_states:
def cancel(self, task): self.__lock.acquire() try: thread = self.__threads[task] thread.cancel() while not isinstance(task.exception, CancelException): time.sleep(0.0005) task.cancel() finally: self.__lock.release()
self.rapi.add_package_to_group(repo["id"], pkggroup["id"],
self.rapi.add_packages_to_group(repo["id"], pkggroup["id"],
def test_repo_package_groups(self): repo = self.rapi.create('some-id','some name', \ 'i386', 'yum:http://example.com') pkggroup = self.rapi.create_packagegroup(repo["id"], 'test-group-id', 'test-group-name', 'test-group-description') package = self.create_package('test_repo_packages') self.rapi.add_package(repo["id"], package["id"]) self.rapi.add_package_to_group(repo["id"], pkggroup["id"], package["name"], gtype="default") # Verify package is present in group found = self.rapi.repository('some-id') self.assertTrue(found['packagegroups'] is not None) self.assertTrue(pkggroup['id'] in found['packagegroups']) self.assertTrue(package["name"] in \ found['packagegroups'][pkggroup['id']]['default_package_names']) # Remove package from package group self.rapi.delete_package_from_group(repo["id"], pkggroup["id"], package["name"], gtype="default") found = self.rapi.repository('some-id') self.assertTrue(found['packagegroups'] is not None) self.assertTrue(pkggroup['id'] in found['packagegroups']) self.assertTrue(package["name"] not in \ found['packagegroups'][pkggroup['id']]['default_package_names']) # Remove packagegroup from repo self.rapi.delete_packagegroup(repo["id"], pkggroup["id"]) found = self.rapi.repository('some-id') self.assertTrue(len(found['packagegroups']) == 0)
print _("\nPackage install failed")
system_exit(-1, _("\nPackage install failed"))
def run(self): consumerid = self.opts.consumerid consumergroupid = self.opts.consumergroupid if not (consumerid or consumergroupid): system_exit(os.EX_USAGE, _("Consumer or consumer group id required. try --help")) pnames = self.opts.pnames if not pnames: system_exit(os.EX_DATAERR, _("Nothing to upload.")) if consumergroupid: task = self.cgconn.installpackages(consumergroupid, pnames) else: task = self.cconn.installpackages(consumerid, pnames) print _('Created task id: %s') % task['id'] state = None spath = task['status_path'] while state not in ('finished', 'error', 'canceled', 'timed_out'): sys.stdout.write('.') sys.stdout.flush() time.sleep(2) status = self.cconn.task_status(spath) state = status['state'] if state == 'finished': print _('\n[%s] installed on %s') % \ (status['result'], (consumerid or consumergroupid)) else: print _("\nPackage install failed")
return self.objectdb.find_one({'id': id}, fields=fields)
consumers = list(self.objectdb.find(spec={'id': id}, fields=fields)) if not consumers: return None return consumers[0]
def consumer(self, id, fields=None): """ Return a single Consumer object """ return self.objectdb.find_one({'id': id}, fields=fields)
pulp_handler = logging.handlers.RotatingFileHandler(pulp_file, maxBytes=max_size, backupCount=backups)
pulp_handler = handlers.RotatingFileHandler(pulp_file, maxBytes=max_size, backupCount=backups)
def configure_pulp_grinder_logging(): """ Pull the log file configurations from the global config and/or default config and initialize the top-level logging for both pulp and grinder. """ level_name = config.config.get('logs', 'level').upper() level = getattr(logging, level_name, logging.INFO) max_size = config.config.getint('logs', 'max_size') backups = config.config.getint('logs', 'backups') fmt = '%(asctime)s [%(levelname)s][%(threadName)s] %(funcName)s() @ %(filename)s:%(lineno)d - %(message)s' formatter = logging.Formatter(fmt) pulp_file = config.config.get('logs', 'pulp_file') check_log_file(pulp_file) pulp_logger = logging.getLogger('pulp') pulp_logger.setLevel(level) pulp_handler = logging.handlers.RotatingFileHandler(pulp_file, maxBytes=max_size, backupCount=backups) pulp_handler.setFormatter(formatter) pulp_logger.addHandler(pulp_handler) grinder_file = config.config.get('logs', 'grinder_file') check_log_file(grinder_file) grinder_logger = logging.getLogger('grinder') grinder_logger.setLevel(level) grinder_handler = logging.handlers.RotatingFileHandler(grinder_file, maxBytes=max_size, backupCount=backups) grinder_handler.setFormatter(formatter) grinder_logger.addHandler(grinder_handler)
grinder_handler = logging.handlers.RotatingFileHandler(grinder_file, maxBytes=max_size, backupCount=backups)
grinder_handler = handlers.RotatingFileHandler(grinder_file, maxBytes=max_size, backupCount=backups)
def configure_pulp_grinder_logging(): """ Pull the log file configurations from the global config and/or default config and initialize the top-level logging for both pulp and grinder. """ level_name = config.config.get('logs', 'level').upper() level = getattr(logging, level_name, logging.INFO) max_size = config.config.getint('logs', 'max_size') backups = config.config.getint('logs', 'backups') fmt = '%(asctime)s [%(levelname)s][%(threadName)s] %(funcName)s() @ %(filename)s:%(lineno)d - %(message)s' formatter = logging.Formatter(fmt) pulp_file = config.config.get('logs', 'pulp_file') check_log_file(pulp_file) pulp_logger = logging.getLogger('pulp') pulp_logger.setLevel(level) pulp_handler = logging.handlers.RotatingFileHandler(pulp_file, maxBytes=max_size, backupCount=backups) pulp_handler.setFormatter(formatter) pulp_logger.addHandler(pulp_handler) grinder_file = config.config.get('logs', 'grinder_file') check_log_file(grinder_file) grinder_logger = logging.getLogger('grinder') grinder_logger.setLevel(level) grinder_handler = logging.handlers.RotatingFileHandler(grinder_file, maxBytes=max_size, backupCount=backups) grinder_handler.setFormatter(formatter) grinder_logger.addHandler(grinder_handler)
handler = logging.handlers.TimedRotatingFileHandler(file, when=units, interval=lifetime, backupCount=backups)
handler = handlers.TimedRotatingFileHandler(file, when=units, interval=lifetime, backupCount=backups)
def configure_audit_logging(): """ Pull the audit logging configuration from the global config and/or default config and initialize pulp's audit logging. """ file = config.config.get('auditing', 'events_file') check_log_file(file) units = 'D' backups = config.config.getint('auditing', 'backups') lifetime = config.config.getint('auditing', 'lifetime') # the logging module will get into an infinite loop if the interval is 0 if lifetime <= 0: units = 'H' lifetime = 1 # NOTE, this cannot be a descendant of the pulp log as it will inherit # pulp's rotating log and handler and log to both files. Yes, I've tried # removing the handler to no avail... logger = logging.getLogger('auditing') logger.setLevel(logging.INFO) handler = logging.handlers.TimedRotatingFileHandler(file, when=units, interval=lifetime, backupCount=backups) logger.addHandler(handler)
if not started: configure_pulp_grinder_logging() configure_audit_logging() started = True
if started: return configure_pulp_grinder_logging() configure_audit_logging() started = True
def start_logging(): """ Convenience function to start pulp's different logging mechanisms. """ assert config.config is not None global started if not started: configure_pulp_grinder_logging() configure_audit_logging() started = True
if started: logging.shutdown() started = False
if not started: return logging.shutdown() logging.Logger.manager.loggerDict = {} started = False
def stop_logging(): """ Convenience function to stop pulp's different logging mechanisms. """ global started if started: logging.shutdown() started = False
print _('repository: %s') % repo['id'] print _('number of packages: %d') % repo['package_count'] last_sync = 'never' if repo['last_sync'] is None else str(repo['last_sync']) print _('last sync: %s') % last_sync
print _(' repository: %s') % repo['id'] print _(' number of packages: %d') % repo['package_count'] last_sync = repo['last_sync'] if last_sync is None: last_sync = 'never' else: last_sync = str(parse_date(last_sync)) print _(' last sync: %s') % last_sync
def run(self): id = self.get_required_option('id') repo = self.pconn.repository(id) syncs = self.pconn.sync_list(id) print _('repository: %s') % repo['id'] print _('number of packages: %d') % repo['package_count'] last_sync = 'never' if repo['last_sync'] is None else str(repo['last_sync']) print _('last sync: %s') % last_sync if syncs and syncs[0]['state'] in ('waiting', 'running'): print _('currently syncing:'), if syncs[0]['progress'] is None: print _('progress unknown') else: pkgs_left = syncs[0]['progress']['items_left'] pkgs_total = syncs[0]['progress']['items_total'] bytes_left = float(syncs[0]['progress']['size_left']) bytes_total = float(syncs[0]['progress']['size_total']) percent = (bytes_total - bytes_left) / bytes_total print _('%d%% done (%d of %d packages downloaded)') % \ (int(percent), (pkgs_total - pkgs_left), pkgs_total)
"system-config-boot", gtype="default")
"pulp-test-package", gtype="default")
def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out
["newPackage"], gtype="default")
["pulp-test-package"], gtype="default")
def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out
["test_package_name"], gtype="default")
["pulp-test-package"], gtype="default")
def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out
self.assertTrue("test_package_name" in found["default_package_names"])
self.assertTrue("pulp-test-package" in found["default_package_names"])
def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out
"test_package_name", gtype="default")
"pulp-test-package", gtype="default")
def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out
self.assertTrue("test_package_name" not in found["default_package_names"])
self.assertTrue("pulp-test-package" not in found["default_package_names"])
def immutable_groups(self): #TODO until we fix group import, this tests needs to be commented out
log.info("No valid server found, default to localhost")
LOG.info("No valid server found, default to localhost")
def check_user_pass_on_ldap(self, username, password=None): ''' verify the credentials for user on ldap server. @param username: Userid to be validated on ldap server @param password: password credentials for userid @return: user instance of the authenticated user if valid credentials were specified; None otherwise @rtype: L{pulp.server.db.model.User} ''' if not config.has_section("ldap"): LOG.info("No external ldap server available") return try: ldapserver = config.get("ldap", "uri") except: log.info("No valid server found, default to localhost") ldapserver = "ldap://localhost" try: base = config.get("ldap", "base") except: log.info("No valid base found, default to localhost") base = "dc=localhost" ldapserv = LDAPConnection(ldapserver) ldapserv.connect() if password: status = ldapserv.authenticate_user(base, username, password) else: status = ldapserv.lookup_user(base, username)
log.info("No valid base found, default to localhost")
LOG.info("No valid base found, default to localhost")
def check_user_pass_on_ldap(self, username, password=None): ''' verify the credentials for user on ldap server. @param username: Userid to be validated on ldap server @param password: password credentials for userid @return: user instance of the authenticated user if valid credentials were specified; None otherwise @rtype: L{pulp.server.db.model.User} ''' if not config.has_section("ldap"): LOG.info("No external ldap server available") return try: ldapserver = config.get("ldap", "uri") except: log.info("No valid server found, default to localhost") ldapserver = "ldap://localhost" try: base = config.get("ldap", "base") except: log.info("No valid base found, default to localhost") base = "dc=localhost" ldapserv = LDAPConnection(ldapserver) ldapserv.connect() if password: status = ldapserv.authenticate_user(base, username, password) else: status = ldapserv.lookup_user(base, username)
repo = self.input() API.update(repo)
repo_data = self.input() repo_data['id'] = id API.update(repo_data)
def POST(self, id): """ @return: True on successful update of repository meta data """ repo = self.input() API.update(repo) return self.output(True)
return super_user_role in user.roles
return super_user_role in user['roles']
def is_superuser(user): """ Return True if the user is a super user @type user: L{pulp.server.db.model.User} instance @param user: user to check @rtype: bool @return: True if the user is a super user, False otherwise """ return super_user_role in user.roles
path = http.extend_uri_path(repo.id)
path = http.extend_uri_path(repo["id"])
def PUT(self): """ Create a new repository. @return: repository meta data on successful creation of repository """ repo_data = self.params()
return - ('no description available')
return _('no description available')
def description(self): """ Return a string showing the command's description """ return - ('no description available')
"""
def description(self): """ Return a string for this action's description return _('no description available') """
return web.ctx.path
return web.http.url(web.ctx.path)
def _status_path(self, id): """ Construct a URL path that can be used to poll a task's status A status path is constructed as follows: /<collection>/<object id>/<action>/<action id>/ A GET request sent to this path will get a JSON encoded status object """ parts = web.ctx.path.split('/') if parts[-2] == id: return web.ctx.path return self.extend_path(id)
user = self.userconn.create(login=self.options.newusername, password=self.options.newpassword, name=self.options.name)
user = self.userconn.create(self.options.newusername, self.options.newpassword, self.options.name)
def _create(self): if not self.options.newusername: print("newusername required. Try --help") sys.exit(0) if not self.options.name: self.options.name = "" if not self.options.newpassword: self.options.newpassword = "" try: user = self.userconn.create(login=self.options.newusername, password=self.options.newpassword, name=self.options.name) print _(" Successfully created User [ %s ] with name [ %s ]" % \ (user['login'], user["name"])) except RestlibException, re: log.error("Error: %s" % re) systemExit(re.code, re.msg) except Exception, e: log.error("Error: %s" % e) raise
data = self.input()
data = self.params()
def installpackages(self, id): """ Install packages. Body contains a list of package names. """ data = self.input() names = data.get('packagenames', []) return self.ok(API.installpackages(id, names))
def test_query_invalid_consumer_id(self):
def disabled_query_invalid_consumer_id(self):
def test_query_invalid_consumer_id(self): # Test self.assertRaises(PulpException, self.consumer_history_api.query, consumer_id='foo')
self.consumer_history_api.consumer_created(1) self.consumer_history_api.consumer_created(2, originator='admin1')
def _populate_for_queries(self): ''' Populates the history store with a number of entries to help test the query functionality. '''
def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None):
def __init__(self, host, port, apihandler, cert_file=None, key_file=None, username=None, password=None):
def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None): self.host = host # ensure we have an integer, httpslib is picky about the type # passed in for the port self.port = int(port) self.apihandler = ''.join((apiprefix, apihandler)) self.username = username self.password = password if (self.username != None): raw = "%s:%s" % (self.username, self.password) base64string = base64.encodestring(raw)[:-1] auth = "Basic %s" % base64string else: auth = None self.headers = {"Content-type":"application/json", "Authorization": auth, "Accept": "application/json", "Accept-Language": locale.getdefaultlocale()[0].lower().replace('_', '-')} self.cert_file = cert_file self.key_file = key_file
self.apihandler = ''.join((apiprefix, apihandler))
self.apihandler = apihandler
def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None): self.host = host # ensure we have an integer, httpslib is picky about the type # passed in for the port self.port = int(port) self.apihandler = ''.join((apiprefix, apihandler)) self.username = username self.password = password if (self.username != None): raw = "%s:%s" % (self.username, self.password) base64string = base64.encodestring(raw)[:-1] auth = "Basic %s" % base64string else: auth = None self.headers = {"Content-type":"application/json", "Authorization": auth, "Accept": "application/json", "Accept-Language": locale.getdefaultlocale()[0].lower().replace('_', '-')} self.cert_file = cert_file self.key_file = key_file
def __init__(self, host='localhost', port=443, handler="", cert_file=None, key_file=None,
def __init__(self, host='localhost', port=443, handler="/pulp/api", cert_file=None, key_file=None,
def __init__(self, host='localhost', port=443, handler="", cert_file=None, key_file=None, username=None, password=None): self.host = host self.port = port self.handler = handler self.conn = None self.cert_file = cert_file self.key_file = key_file self.username = username self.password = password # initialize connection self.setUp()
self.conn = Restlib(self.host, self.port, self.handler, cert_file=self.cert_file, key_file=self.key_file, username=self.username, password=self.password)
self.conn = Restlib(self.host, self.port, self.handler, self.cert_file, self.key_file, self.username, self.password)
def setUp(self): self.conn = Restlib(self.host, self.port, self.handler, cert_file=self.cert_file, key_file=self.key_file, username=self.username, password=self.password) log.info("Connection Established for cli: Host: %s, Port: %s, handler: %s" % (self.host, self.port, self.handler)) log.info("Using cert_file: %s and key_file: %s" % (self.cert_file, self.key_file))
return json.dumps("Authorization Failure. Check your username and password or your Certificate",
return json.dumps("Authorization Failure. Check your username and password or your certificate",
def check_roles(*fargs, **kw): ''' Strip off the decorator arguments so we can use those to check the Roles of the current caller.
dirList = os.listdir(self.config.get('paths', 'local_storage') + '/' + repo['id'])
dirList = os.listdir(self.config.get('paths', 'local_storage') + '/repos/' + repo['id'])
def test_sync(self): repo = self.rapi.create('some-id','some name', 'i386', 'yum:http://mmccune.fedorapeople.org/pulp/') failed = False try: self.rapi.sync('invalid-id-not-found') except Exception: failed = True assert(failed) self.rapi.sync(repo['id']) # Check that local storage has dir and rpms dirList = os.listdir(self.config.get('paths', 'local_storage') + '/' + repo['id']) assert(len(dirList) > 0) found = self.rapi.repository(repo['id']) packages = found['packages'] assert(packages is not None) assert(len(packages) > 0)
key = self.opts.key
key = getattr(self.opts, 'key', None)
def _get_cert_options(self): cacert = self.opts.cacert cert = self.opts.cert key = self.opts.key if not (cacert and cert and key): return None return {"ca": utils.readFile(cacert), "cert": utils.readFile(cert), "key": utils.readFile(key)}
msgFile = os.fdopen(fd, 'w')
msgFile = open(filename, 'w')
def writeToFile(filename, message, overwrite=True): dir_name = os.path.dirname(filename) if not os.access(dir_name, os.W_OK): os.mkdir(dir_name) if os.access(filename, os.F_OK) and not overwrite: # already have file there; let's back it up try: os.rename(filename, filename + '.save') except: return False fd = os.open(filename, os.O_WRONLY | os.O_CREAT, 0644) msgFile = os.fdopen(fd, 'w') try: msgFile.write(message) finally: msgFile.close() fd.close() return True
usage = "usage: %prog consumer [OPTIONS]"
usage = "consumer [OPTIONS]"
def __init__(self, is_admin=True, actions=None): usage = "usage: %prog consumer [OPTIONS]" shortdesc = "consumer specific actions to pulp server." desc = "" self.name = "consumer" self.actions = actions or {"delete" : "Delete the consumer", "update" : "Update consumer profile", "list" : "List of accessible consumer info", "bind" : "Bind the consumer to listed repos", "unbind" : "Unbind the consumer from repos",} self.is_admin = is_admin BaseCore.__init__(self, "consumer", usage, shortdesc, desc) self.cconn = None
spe.call('GiveNamedItem', pPlayer, str(item_name)) return True
return spe.call('GiveNamedItem', pPlayer, str(item_name), 0)
def giveNamedItem( userid, item_name ): # Get the player instance pPlayer = spe.getPlayer(int(userid)) # Is the player instance valid? if not pPlayer: # Return False since the player instance was not valid return False # Give the player the item spe.call('GiveNamedItem', pPlayer, str(item_name)) return True
return False
return None
def giveNamedItem( userid, item_name ): # Get the player instance pPlayer = spe.getPlayer(int(userid)) # Is the player instance valid? if not pPlayer: # Return False since the player instance was not valid return False # Give the player the item return spe.call('GiveNamedItem', pPlayer, str(item_name), 0)
pEntity = entityByIndex( int(entity_index) )
pEntity = spe.getEntityOfIndex( int(entity_index) )
def removeEntityByIndex( entity_index ): # Get entity instance pEntity = entityByIndex( int(entity_index) ) # Make sure it's valid if not pEntity: # Return false if the entity was None. return False # Remove it! spe.call("Remove", pEntity) return True
pEntity = entityByIndex( int(entity_index) )
pEntity = spe.getEntityOfIndex( int(entity_index) )
def setStringKeyvalue( entity_index, keyvalue_name, new_value ): # Get entity instance pEntity = entityByIndex( int(entity_index) ) # Make sure the entity is valid if not pEntity: # Return False if the entity was None. return False # Set the keyvalue spe.call("setkv_string", pEntity, keyvalue_name, new_value) return True
state = 'IDLE'
IDLE, START, ADDRESS, DATA = range(4) state = IDLE
def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/SDA bit values (0/1 for low/high) of the first sample. s = ord(inbuf[0]) oldscl = (s & (1 << scl_bit)) >> scl_bit oldsda = (s & (1 << sda_bit)) >> sda_bit # Loop over all samples. # TODO: Handle LAs with more/less than 8 channels. for samplenum, s in enumerate(inbuf[1:]): # We skip the first byte... s = ord(s) # FIXME # Get SCL/SDA bit values (0/1 for low/high). scl = (s & (1 << scl_bit)) >> scl_bit sda = (s & (1 << sda_bit)) >> sda_bit # TODO: Wait until the bus is idle (SDA = SCL = 1) first? # START condition (S): SDA = falling, SCL = high if (oldsda == 1 and sda == 0) and scl == 1: o += "%d\t\tSTART\n" % samplenum state = 'ADDRESS' bitcount = data = 0 # Data latching by transmitter: SCL = low elif (scl == 0): pass # TODO # Data sampling of receiver: SCL = rising elif (oldscl == 0 and scl == 1): bitcount += 1 # o += "%d\t\tRECEIVED BIT %d: %d\n" % \ # (samplenum, 8 - bitcount, sda) # Address and data are transmitted MSB-first. data <<= 1 data |= sda if bitcount != 9: continue # We received 8 address/data bits and the ACK/NACK bit. data >>= 1 # Shift out unwanted ACK/NACK bit here. o += "%d\t\t%s: " % (samplenum, state) ack = (sda == 1) and 'NACK' or 'ACK' d = (state == 'ADDRESS') and (data & 0xfe) or data wr = '' if state == 'ADDRESS': wr = (data & 1) and ' (W)' or ' (R)' state = 'DATA' o += "0x%02x%s (%s)\n" % (d, wr, ack) bitcount = data = 0 # STOP condition (P): SDA = rising, SCL = high elif (oldsda == 0 and sda == 1) and scl == 1: o += "%d\t\tSTOP\n" % samplenum state = 'IDLE' # Save current SDA/SCL values for the next round. oldscl = scl oldsda = sda return o
state = 'ADDRESS'
state = ADDRESS
def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/SDA bit values (0/1 for low/high) of the first sample. s = ord(inbuf[0]) oldscl = (s & (1 << scl_bit)) >> scl_bit oldsda = (s & (1 << sda_bit)) >> sda_bit # Loop over all samples. # TODO: Handle LAs with more/less than 8 channels. for samplenum, s in enumerate(inbuf[1:]): # We skip the first byte... s = ord(s) # FIXME # Get SCL/SDA bit values (0/1 for low/high). scl = (s & (1 << scl_bit)) >> scl_bit sda = (s & (1 << sda_bit)) >> sda_bit # TODO: Wait until the bus is idle (SDA = SCL = 1) first? # START condition (S): SDA = falling, SCL = high if (oldsda == 1 and sda == 0) and scl == 1: o += "%d\t\tSTART\n" % samplenum state = 'ADDRESS' bitcount = data = 0 # Data latching by transmitter: SCL = low elif (scl == 0): pass # TODO # Data sampling of receiver: SCL = rising elif (oldscl == 0 and scl == 1): bitcount += 1 # o += "%d\t\tRECEIVED BIT %d: %d\n" % \ # (samplenum, 8 - bitcount, sda) # Address and data are transmitted MSB-first. data <<= 1 data |= sda if bitcount != 9: continue # We received 8 address/data bits and the ACK/NACK bit. data >>= 1 # Shift out unwanted ACK/NACK bit here. o += "%d\t\t%s: " % (samplenum, state) ack = (sda == 1) and 'NACK' or 'ACK' d = (state == 'ADDRESS') and (data & 0xfe) or data wr = '' if state == 'ADDRESS': wr = (data & 1) and ' (W)' or ' (R)' state = 'DATA' o += "0x%02x%s (%s)\n" % (d, wr, ack) bitcount = data = 0 # STOP condition (P): SDA = rising, SCL = high elif (oldsda == 0 and sda == 1) and scl == 1: o += "%d\t\tSTOP\n" % samplenum state = 'IDLE' # Save current SDA/SCL values for the next round. oldscl = scl oldsda = sda return o
o += "%d\t\t%s: " % (samplenum, state)
o += "%d\t\tTODO:STATE: " % samplenum
def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/SDA bit values (0/1 for low/high) of the first sample. s = ord(inbuf[0]) oldscl = (s & (1 << scl_bit)) >> scl_bit oldsda = (s & (1 << sda_bit)) >> sda_bit # Loop over all samples. # TODO: Handle LAs with more/less than 8 channels. for samplenum, s in enumerate(inbuf[1:]): # We skip the first byte... s = ord(s) # FIXME # Get SCL/SDA bit values (0/1 for low/high). scl = (s & (1 << scl_bit)) >> scl_bit sda = (s & (1 << sda_bit)) >> sda_bit # TODO: Wait until the bus is idle (SDA = SCL = 1) first? # START condition (S): SDA = falling, SCL = high if (oldsda == 1 and sda == 0) and scl == 1: o += "%d\t\tSTART\n" % samplenum state = 'ADDRESS' bitcount = data = 0 # Data latching by transmitter: SCL = low elif (scl == 0): pass # TODO # Data sampling of receiver: SCL = rising elif (oldscl == 0 and scl == 1): bitcount += 1 # o += "%d\t\tRECEIVED BIT %d: %d\n" % \ # (samplenum, 8 - bitcount, sda) # Address and data are transmitted MSB-first. data <<= 1 data |= sda if bitcount != 9: continue # We received 8 address/data bits and the ACK/NACK bit. data >>= 1 # Shift out unwanted ACK/NACK bit here. o += "%d\t\t%s: " % (samplenum, state) ack = (sda == 1) and 'NACK' or 'ACK' d = (state == 'ADDRESS') and (data & 0xfe) or data wr = '' if state == 'ADDRESS': wr = (data & 1) and ' (W)' or ' (R)' state = 'DATA' o += "0x%02x%s (%s)\n" % (d, wr, ack) bitcount = data = 0 # STOP condition (P): SDA = rising, SCL = high elif (oldsda == 0 and sda == 1) and scl == 1: o += "%d\t\tSTOP\n" % samplenum state = 'IDLE' # Save current SDA/SCL values for the next round. oldscl = scl oldsda = sda return o
d = (state == 'ADDRESS') and (data & 0xfe) or data
d = (state == ADDRESS) and (data & 0xfe) or data
def sigrokdecode_i2c(inbuf): """I2C protocol decoder""" # FIXME: This should be passed in as metadata, not hardcoded here. signals = (2, 5) channels = 8 o = wr = ack = d = '' bitcount = data = 0 state = 'IDLE' # Get the bit number (and thus probe index) of the SCL/SDA signals. scl_bit, sda_bit = signals # Get SCL/SDA bit values (0/1 for low/high) of the first sample. s = ord(inbuf[0]) oldscl = (s & (1 << scl_bit)) >> scl_bit oldsda = (s & (1 << sda_bit)) >> sda_bit # Loop over all samples. # TODO: Handle LAs with more/less than 8 channels. for samplenum, s in enumerate(inbuf[1:]): # We skip the first byte... s = ord(s) # FIXME # Get SCL/SDA bit values (0/1 for low/high). scl = (s & (1 << scl_bit)) >> scl_bit sda = (s & (1 << sda_bit)) >> sda_bit # TODO: Wait until the bus is idle (SDA = SCL = 1) first? # START condition (S): SDA = falling, SCL = high if (oldsda == 1 and sda == 0) and scl == 1: o += "%d\t\tSTART\n" % samplenum state = 'ADDRESS' bitcount = data = 0 # Data latching by transmitter: SCL = low elif (scl == 0): pass # TODO # Data sampling of receiver: SCL = rising elif (oldscl == 0 and scl == 1): bitcount += 1 # o += "%d\t\tRECEIVED BIT %d: %d\n" % \ # (samplenum, 8 - bitcount, sda) # Address and data are transmitted MSB-first. data <<= 1 data |= sda if bitcount != 9: continue # We received 8 address/data bits and the ACK/NACK bit. data >>= 1 # Shift out unwanted ACK/NACK bit here. o += "%d\t\t%s: " % (samplenum, state) ack = (sda == 1) and 'NACK' or 'ACK' d = (state == 'ADDRESS') and (data & 0xfe) or data wr = '' if state == 'ADDRESS': wr = (data & 1) and ' (W)' or ' (R)' state = 'DATA' o += "0x%02x%s (%s)\n" % (d, wr, ack) bitcount = data = 0 # STOP condition (P): SDA = rising, SCL = high elif (oldsda == 0 and sda == 1) and scl == 1: o += "%d\t\tSTOP\n" % samplenum state = 'IDLE' # Save current SDA/SCL values for the next round. oldscl = scl oldsda = sda return o