rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
_thread_tree = {} _Thread = threading.Thread class TrackableThread(_Thread): """ Monkey patch thread class that records the parent thread in the thread tree E.g. the thread that called this thread object's start() method
|
class TrackedThread(_Thread): """
|
def __exit__(self, *args, **kwargs): self.release()
|
super(TrackableThread, self).start()
|
return super(TrackedThread, self).start()
|
def start(self): parent = threading.current_thread() _thread_tree.setdefault(parent, []).append(self) super(TrackableThread, self).start()
|
threading.Thread = TrackableThread
|
threading.Thread = TrackedThread
|
def start(self): parent = threading.current_thread() _thread_tree.setdefault(parent, []).append(self) super(TrackableThread, self).start()
|
@type thread: TrackableThread instance
|
@type thread: L{TrackedThread} instance
|
def get_descendants(thread): """ Get a list of all the descendant threads for the given thread. @type thread: TrackableThread instance @param thread: thread to find the descendants of @raise RuntimeError: if the thread is not an instance of TrackableThread @return: list of TrackableThread instances """ if not isinstance(thread, TrackableThread): raise RuntimeError('Cannot find descendants of an untrackable thread') descendants = _thread_tree.get(thread, []) for d in descendants: descendants.extend(_thread_tree.get(d, [])) return descendants
|
@raise RuntimeError: if the thread is not an instance of TrackableThread @return: list of TrackableThread instances """ if not isinstance(thread, TrackableThread): raise RuntimeError('Cannot find descendants of an untrackable thread')
|
@raise RuntimeError: if the thread is not an instance of TrackedThread @return: list of TrackedThread instances """ if not isinstance(thread, TrackedThread): raise RuntimeError('Cannot find descendants of an untracked thread')
|
def get_descendants(thread): """ Get a list of all the descendant threads for the given thread. @type thread: TrackableThread instance @param thread: thread to find the descendants of @raise RuntimeError: if the thread is not an instance of TrackableThread @return: list of TrackableThread instances """ if not isinstance(thread, TrackableThread): raise RuntimeError('Cannot find descendants of an untrackable thread') descendants = _thread_tree.get(thread, []) for d in descendants: descendants.extend(_thread_tree.get(d, [])) return descendants
|
def remove_subtree(thread): if not isinstance(thread, TrackedThread): raise RuntimeError('Cannot clear subtree of an untracked thread') descendents = _thread_tree.pop(thread, []) for d in descendents: descendents.extend(_thread_tree.pop(d, [])) return len(descendents)
|
def get_descendants(thread): """ Get a list of all the descendant threads for the given thread. @type thread: TrackableThread instance @param thread: thread to find the descendants of @raise RuntimeError: if the thread is not an instance of TrackableThread @return: list of TrackableThread instances """ if not isinstance(thread, TrackableThread): raise RuntimeError('Cannot find descendants of an untrackable thread') descendants = _thread_tree.get(thread, []) for d in descendants: descendants.extend(_thread_tree.get(d, [])) return descendants
|
|
class InterruptableThread(TrackableThread): """ A thread class that supports raising exception in the thread from another thread. """
|
class TaskThreadException(Exception): """ Base class for task-specific exceptions to be raised in a task thread. """ pass class TimeoutException(TaskThreadException): """ Exception to interrupt a task with a time out. """ pass class CancelException(TaskThreadException): """ Exception to interrupt a task with a cancellation. """ pass class TaskThread(TrackedThread): """ Derived task thread class that allows for task-specific interruptions. """
|
def _raise_exception_in_thread(tid, exc_type): """ Raises an exception in the threads with id tid. """ assert inspect.isclass(exc_type) # NOTE this returns the number of threads that it modified, which should # only be 1 or 0 (if the thread id wasn't found) long_tid = ctypes.c_long(tid) exc_ptr = ctypes.py_object(exc_type) num = ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, exc_ptr) if num == 1: return if num == 0: raise ValueError('Invalid thread id') # NOTE if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect null_ptr = ctypes.py_object() ctypes.pythonapi.PyThreadState_SetAsyncExc(long_tid, null_ptr) raise SystemError('PyThreadState_SetAsyncExc failed')
|
super(InterruptableThread, self).__init__(*args, **kwargs)
|
super(TaskThread, self).__init__(*args, **kwargs)
|
def __init__(self, *args, **kwargs): super(InterruptableThread, self).__init__(*args, **kwargs) self.__default_timeout = 0.05 self.__exception_event = threading.Event()
|
@property def _tid(self): """ Determine this thread's id. """ if not self.is_alive(): raise threading.ThreadError('Thread is not active') if hasattr(self, '_thread_id'): return self._thread_id for tid, tobj in threading._active.items(): if tobj is self: self._thread_id = tid return tid raise AssertionError('Could not determine thread id') def exception_event(self):
|
def exception_delivered(self):
|
def __init__(self, *args, **kwargs): super(InterruptableThread, self).__init__(*args, **kwargs) self.__default_timeout = 0.05 self.__exception_event = threading.Event()
|
_raise_exception_in_thread(self._tid, exc_type)
|
_raise_exception_in_thread(_tid(self), exc_type)
|
def raise_exception(self, exc_type): """ Raise and exception in this thread. NOTE this is executed in the context of the calling thread and blocks until the exception has been delivered to this thread and this thread exists. """ while not self.__exception_event.is_set(): try: _raise_exception_in_thread(self._tid, exc_type) self.__exception_event.wait(self.__default_timeout) except (threading.ThreadError, AssertionError, ValueError, SystemError), e: _log.error('Failed to deliver exception %s to thread[%s]: %s' % (exc_type.__name__, str(self.ident), e.message)) break
|
class TaskThreadException(Exception): """ Base class for task-specific exceptions to be raised in a task thread. """ pass class TimeoutException(TaskThreadException): """ Exception to interrupt a task with a time out. """ pass class CancelException(TaskThreadException): """ Exception to interrupt a task with a cancellation. """ pass class TaskThread(InterruptableThread): """ Derived task thread class that allows for task-specific interruptions. """
|
def raise_exception(self, exc_type): """ Raise and exception in this thread. NOTE this is executed in the context of the calling thread and blocks until the exception has been delivered to this thread and this thread exists. """ while not self.__exception_event.is_set(): try: _raise_exception_in_thread(self._tid, exc_type) self.__exception_event.wait(self.__default_timeout) except (threading.ThreadError, AssertionError, ValueError, SystemError), e: _log.error('Failed to deliver exception %s to thread[%s]: %s' % (exc_type.__name__, str(self.ident), e.message)) break
|
|
self.user_api = UserApi() self.consumer_api = ConsumerApi()
|
def __init__(self, *dec_args, **dec_kw): '''The decorator arguments are passed here. Save them for runtime.''' self.dec_args = dec_args self.dec_kw = dec_kw
|
|
user = USER_API.user(username)
|
user = self.user_api.user(username)
|
def _validate_user_exists(self, username): user = USER_API.user(username) if user is None: LOG.error('User [%s] specified in certificate was not found in the system' % username) return None return user
|
consumer = CONSUMER_API.consumer(consumer_cert_uid)
|
consumer = self.consumer_api.consumer(consumer_cert_uid)
|
def check_consumer(self, check_id=False, *fargs): ''' Determines if the certificate in the request represents a valid consumer certificate.
|
if reboot_status.has_key('reboot_performed') and reboot_status['reboot_performed']:
|
if not reboot_status: print _('\nSuccessfully installed [%s] on [%s]') % \ (installed, (consumerid or (consumergroupid))) elif reboot_status.has_key('reboot_performed') and reboot_status['reboot_performed']:
|
def run(self): errataids = self.args
|
(installed, (consumerid or (consumergroupid)))) else: print _('\nSuccessfully installed [%s] on [%s]') % \ (installed, (consumerid or (consumergroupid)))
|
(installed, (consumerid or (consumergroupid))))
|
def run(self): errataids = self.args
|
repo['relativepath'], p["filename"])
|
repo['relative_path'], p["filename"])
|
def remove_package(self, repoid, p): """Note: This method does not update repo metadata. It is assumed metadata has already been updated. """ repo = self._get_existing_repo(repoid) # this won't fail even if the package is not in the repo's packages repo['packages'].pop(p['id'], None) self.objectdb.save(repo, safe=True) # Remove package from repo location on file system pkg_repo_path = pulp.server.util.get_repo_package_path( repo['relativepath'], p["filename"]) if os.path.exists(pkg_repo_path): log.debug("Delete package %s at %s" % (p, pkg_repo_path)) os.remove(pkg_repo_path)
|
log.error("Error: %s" % re) systemExit(re.code, re.msg)
|
print _(" Adding consumer failed ") log.error("Error: %s" % re) sys.exit(-1)
|
def _add_consumer(self): if not self.options.consumerid: print("consumer id required. Try --help") sys.exit(0) if not self.options.groupid: print("group id required. Try --help") sys.exit(0) try: self.cgconn.add_consumer(self.options.groupid, self.options.consumerid) print _(" Successfully added Consumer [%s] to Group [%s]" % (self.options.consumerid, self.options.groupid)) except RestlibException, re: log.error("Error: %s" % re) systemExit(re.code, re.msg) except Exception, e: log.error("Error: %s" % e) raise
|
repo.packages[package.id] = package
|
repo.packages[package["packageid"]] = package
|
def test_repo_packages(self): repo = self.rapi.create('some-id','some name', \ 'i386', 'yum:http://example.com') package = Package('test_repo_packages','test package') repo.packages[package.id] = package self.rapi.update(repo) found = self.rapi.repository('some-id') packages = found['packages'] assert(packages != None) assert(packages['test_repo_packages'] != None)
|
pkggroup.default_package_names.append(package.id) repo.packagegroups[pkggroup.groupid] = pkggroup repo.packages[package.id] = package
|
pkggroup.default_package_names.append(package["packageid"]) repo.packagegroups[pkggroup["groupid"]] = pkggroup repo.packages[package["packageid"]] = package
|
def test_repo_package_groups(self): repo = self.rapi.create('some-id','some name', \ 'i386', 'yum:http://example.com') pkggroup = PackageGroup('test-group-id', 'test-group-name', 'test-group-description') package = Package('test_repo_packages','test package') pkggroup.default_package_names.append(package.id) repo.packagegroups[pkggroup.groupid] = pkggroup repo.packages[package.id] = package self.rapi.update(repo) found = self.rapi.repository('some-id') packages = found['packages'] assert(packages != None) assert(packages['test_repo_packages'] != None) assert(found['packagegroups'] != None) print "test_repo_package_groups found['packagegroups'] = %s" % (found['packagegroups']) assert(pkggroup.groupid in found['packagegroups'])
|
pkggroup.default_package_names.append(package.id)
|
pkggroup.default_package_names.append(package["packageid"])
|
def test_repo_package_group_categories(self): repo = self.rapi.create('some-id','some name', \ 'i386', 'yum:http://example.com') package = Package('test_repo_packages','test package') pkggroup = PackageGroup('test-group-id', 'test-group-name', 'test-group-description') pkggroup.default_package_names.append(package.id) ctg = PackageGroupCategory('test-group-cat-id', 'test-group-cat-name', 'test-group-cat-description') ctg.packagegroupids = pkggroup.id repo.packagegroupcategories[ctg.categoryid] = ctg repo.packagegroups[pkggroup.groupid] = pkggroup repo.packages[package.id] = package self.rapi.update(repo) found = self.rapi.repository('some-id') packages = found['packages'] assert(packages != None) assert(packages['test_repo_packages'] != None) assert(found['packagegroups'] != None) print "test_repo_package_groups found['packagegroups'] = %s" % (found['packagegroups']) assert(pkggroup.groupid in found['packagegroups']) assert(found['packagegroupcategories'] != None) assert(ctg.categoryid in found['packagegroupcategories'])
|
repo.packages[package.id] = package
|
repo.packages[package["packageid"]] = package
|
def test_repo_package_group_categories(self): repo = self.rapi.create('some-id','some name', \ 'i386', 'yum:http://example.com') package = Package('test_repo_packages','test package') pkggroup = PackageGroup('test-group-id', 'test-group-name', 'test-group-description') pkggroup.default_package_names.append(package.id) ctg = PackageGroupCategory('test-group-cat-id', 'test-group-cat-name', 'test-group-cat-description') ctg.packagegroupids = pkggroup.id repo.packagegroupcategories[ctg.categoryid] = ctg repo.packagegroups[pkggroup.groupid] = pkggroup repo.packages[package.id] = package self.rapi.update(repo) found = self.rapi.repository('some-id') packages = found['packages'] assert(packages != None) assert(packages['test_repo_packages'] != None) assert(found['packagegroups'] != None) print "test_repo_package_groups found['packagegroups'] = %s" % (found['packagegroups']) assert(pkggroup.groupid in found['packagegroups']) assert(found['packagegroupcategories'] != None) assert(ctg.categoryid in found['packagegroupcategories'])
|
c.packageids.append(package.id)
|
c.packageids.append(package["packageid"])
|
def test_consumerwithpackage(self): c = self.capi.create('test-consumer', 'some consumer desc') package = Package('test_consumerwithpackage','test package search') c.packageids.append(package.id) for i in range(10): package = Package(randomString(), randomString()) c.packageids.append(package.id) self.capi.update(c) found = self.capi.consumerswithpackage('some-invalid-id') assert(len(found) == 0)
|
descendants.extend(_thread_tree.get(d, []))
|
descendants.extend(_thread_tree.get(d(), []))
|
def get_descendants(thread): """ Get a list of all the descendant threads for the given thread. @type thread: L{TrackedThread} instance @param thread: thread to find the descendants of @raise RuntimeError: if the thread is not an instance of TrackedThread @return: list of TrackedThread instances """ if not isinstance(thread, TrackedThread): raise RuntimeError('Cannot find descendants of an untracked thread') descendants = _thread_tree.get(thread, []) for d in descendants: descendants.extend(_thread_tree.get(d, [])) return [d() for d in descendants if d() is not None]
|
address = '127.0.0.1' port = 8811
|
address = config.get('server', 'address') port = config.getint('server', 'port')
|
def _create_socket(config): #address = config.get('server', 'address') #port = config.getint('server', 'port') address = '127.0.0.1' port = 8811 socket = gevent.socket.tcp_listener((address, port)) #if config.getbool('server', 'use_ssl'): if False: keyfile = config.get('ssl', 'keyfile') certfile = config.get('ssl', 'certfile') socket = gevent.ssl.SSLSocket(socket, keyfile, certfile) return socket
|
if False:
|
if config.getboolean('server', 'use_ssl'):
|
def _create_socket(config): #address = config.get('server', 'address') #port = config.getint('server', 'port') address = '127.0.0.1' port = 8811 socket = gevent.socket.tcp_listener((address, port)) #if config.getbool('server', 'use_ssl'): if False: keyfile = config.get('ssl', 'keyfile') certfile = config.get('ssl', 'certfile') socket = gevent.ssl.SSLSocket(socket, keyfile, certfile) return socket
|
if not cid.exists():
|
if not cid:
|
def perform(self): """ Looks up the consumer id and latest pkg profile info and cals the api to update the consumer profile """ bundle = Consumer() cid = bundle.getid() if not cid.exists(): log.error("Not Registered") return try: cconn = ConsumerConnection(host=cfg.server.host or "localhost", port=cfg.server.port or 443) pkginfo = PackageProfile().getPackageList() cconn.profile(cid.read(), pkginfo) log.info("Profile updated successfully for consumer %s" % cid.read()) except RestlibException, re: log.error("Error: %s" % re) except Exception, e: log.error("Error: %s" % e)
|
cconn.profile(cid.read(), pkginfo) log.info("Profile updated successfully for consumer %s" % cid.read())
|
cconn.profile(cid, pkginfo) log.info("Profile updated successfully for consumer %s" % cid)
|
def perform(self): """ Looks up the consumer id and latest pkg profile info and cals the api to update the consumer profile """ bundle = Consumer() cid = bundle.getid() if not cid.exists(): log.error("Not Registered") return try: cconn = ConsumerConnection(host=cfg.server.host or "localhost", port=cfg.server.port or 443) pkginfo = PackageProfile().getPackageList() cconn.profile(cid.read(), pkginfo) log.info("Profile updated successfully for consumer %s" % cid.read()) except RestlibException, re: log.error("Error: %s" % re) except Exception, e: log.error("Error: %s" % e)
|
task2 = self.queue.find(id=task1.id)
|
task2 = self.queue.find(id=task1.id)[0]
|
def test_task_find(self): task1 = Task(noop) self.queue.enqueue(task1) task2 = self.queue.find(id=task1.id) self.assertTrue(task1 is task2)
|
self.assertTrue(found is None)
|
self.assertTrue(not found)
|
def test_find_invalid_criteria(self): # Setup task1 = Task(noop) self.queue.enqueue(task1)
|
self.assertTrue(found is None)
|
self.assertTrue(not found)
|
def test_find_empty_queue(self): # Test found = self.queue.find(id=1)
|
self.assertTrue(found is task1)
|
self.assertTrue(found[0] is task1)
|
def test_find_multiple_criteria(self): # Setup task1 = Task(noop) self.queue.enqueue(task1)
|
self.assertTrue(found is task2)
|
self.assertTrue(found[0] is task2)
|
def test_find_multiple_matching(self): # Setup task1 = Task(noop) task2 = Task(noop)
|
self.assertTrue(status.state == task.state)
|
self.assertTrue(status[0].state == task.state)
|
def test_task_status(self): task = Task(noop) self.queue.enqueue(task) self._wait_for_task(task) status = self.queue.find(id=task.id) self.assertTrue(status.state == task.state)
|
self.rapi.add_packages_to_group(repo["id"], pkg_group["id"], ["test_package_name"])
|
self.rapi.add_packages_to_group(repo["id"], pkg_group["id"], ["pulp-test-package"])
|
def test_create_groups_metadata(self): repo_path = os.path.join(self.data_path, "no_groups_repo") repo = self.rapi.create("test_create_groups_metadata_id", 'test_import_groups_data_id', 'i386', 'local:file://%s' % (repo_path)) self.rapi._sync(repo["id"]) found = self.rapi.packagegroups(repo['id']) self.assertTrue(len(found) == 0) self.assertTrue(repo["group_xml_path"] == "") self.assertTrue(repo["group_gz_xml_path"] == "") pkg_group = self.rapi.create_packagegroup(repo["id"], "test_group", "test_group_name", "test description") self.rapi.add_packages_to_group(repo["id"], pkg_group["id"], ["test_package_name"]) # Update repo object so we can test that group_xml_path was set repo = self.rapi.repository(repo["id"]) self.assertTrue(repo["group_xml_path"] != "") comps = yum.comps.Comps() comps.add(repo["group_xml_path"]) groups = comps.get_groups() self.assertTrue(len(groups) == 1) self.assertTrue(groups[0].groupid == pkg_group["id"]) self.assertTrue(groups[0].name == pkg_group["name"]) self.assertTrue("test_package_name" in groups[0].default_packages) self.assertTrue("test_package_name" not in groups[0].mandatory_packages)
|
self.assertTrue("test_package_name" in groups[0].default_packages) self.assertTrue("test_package_name" not in groups[0].mandatory_packages)
|
self.assertTrue("pulp-test-package" in groups[0].default_packages) self.assertTrue("pulp-test-package" not in groups[0].mandatory_packages)
|
def test_create_groups_metadata(self): repo_path = os.path.join(self.data_path, "no_groups_repo") repo = self.rapi.create("test_create_groups_metadata_id", 'test_import_groups_data_id', 'i386', 'local:file://%s' % (repo_path)) self.rapi._sync(repo["id"]) found = self.rapi.packagegroups(repo['id']) self.assertTrue(len(found) == 0) self.assertTrue(repo["group_xml_path"] == "") self.assertTrue(repo["group_gz_xml_path"] == "") pkg_group = self.rapi.create_packagegroup(repo["id"], "test_group", "test_group_name", "test description") self.rapi.add_packages_to_group(repo["id"], pkg_group["id"], ["test_package_name"]) # Update repo object so we can test that group_xml_path was set repo = self.rapi.repository(repo["id"]) self.assertTrue(repo["group_xml_path"] != "") comps = yum.comps.Comps() comps.add(repo["group_xml_path"]) groups = comps.get_groups() self.assertTrue(len(groups) == 1) self.assertTrue(groups[0].groupid == pkg_group["id"]) self.assertTrue(groups[0].name == pkg_group["name"]) self.assertTrue("test_package_name" in groups[0].default_packages) self.assertTrue("test_package_name" not in groups[0].mandatory_packages)
|
API.bind(id, data['repoid'])
|
API.bind(id, data)
|
def POST(self, id): """ Bind (subscribe) a user to a repository. @param id: consumer id @return: True on successful bind """ data = self.input() API.bind(id, data['repoid']) return self.output(True)
|
API.unbind(id, data['repoid'])
|
API.unbind(id, data)
|
def POST(self, id): """ Unbind (unsubscribe) a user to a repository. @param id: consumer id @return: True on successful unbind """ data = self.input() API.unbind(id, data['repoid']) return self.output(True)
|
addids = [] rmids = []
|
def _update_errata_packages(self, repoid, errataids=[], action=None): repo = self._get_existing_repo(repoid) for erratumid in errataids: erratum = self.errataapi.erratum(erratumid) if erratum is None: log.info("No Erratum with id: %s found" % erratumid) continue addids = [] rmids = [] for pkg in erratum['pkglist']: for pinfo in pkg['packages']: epkg = self.packageapi.package_by_ivera(pinfo['name'], pinfo['version'], pinfo['epoch'], pinfo['release'], pinfo['arch']) if epkg: addids.append(epkg['id']) rmids.append(epkg) if action == 'add': self.add_package(repo['id'], addids) elif action == 'delete': self.remove_packages(repo['id'], rmids)
|
|
description = _('Remove specific package(s) from the source repository.')
|
description = _('Remove package(s) from the repository.')
|
def run(self): id = self.get_required_option('id') if not self.opts.pkgname: system_exit(os.EX_USAGE, _("Error, atleast one package id is required to perform an add.")) if not self.opts.srcrepo: system_exit(os.EX_USAGE, _("Error, a source respository where packages exists is required")) pids = [] for pkg in self.opts.pkgname: pinfo = self.pconn.get_package_by_filename(self.opts.srcrepo, pkg) pids.append(pinfo['id']) try: if pinfo: self.pconn.add_package(id, pids) else: print _("Package [%s] is not part of the source repository [%s]" % (pkg, self.opts.srcrepo)) except Exception: raise print _("Unable to add package [%s] to repo [%s]" % (pkg, id)) print _("Successfully added packages %s to repo [%s]." %(self.opts.pkgname, id))
|
description = _('Remove specific errata from the source repository')
|
description = _('Remove errata from the repository')
|
def run(self): id = self.get_required_option('id') if not self.opts.errataid: system_exit(os.EX_USAGE, _("Error, atleast one erratum id is required to perform an add.")) if not self.opts.srcrepo: system_exit(os.EX_USAGE, _("Error, a source respository where erratum exists is required")) errataids = self.opts.errataid try: self.pconn.add_errata(id, errataids) except Exception: system_exit(os.EX_DATAERR, _("Unable to add errata [%s] to repo [%s]" % (errataids, id))) print _("Successfully added Errata %s to repo [%s]." %(errataids, id))
|
print _("Successfully removed Errata %s to repo [%s]." %(errataids, id))
|
print _("Successfully removed Errata %s from repo [%s]." %(errataids, id))
|
def run(self): id = self.get_required_option('id') if not self.opts.errataid: system_exit(os.EX_USAGE, _("Error, atleast one erratum id is required to perform a delete.")) errataids = self.opts.errataid try: self.pconn.delete_errata(id, errataids) except Exception: print _("Unable to remove errata [%s] to repo [%s]" % (errataids, id)) print _("Successfully removed Errata %s to repo [%s]." %(errataids, id))
|
print_header(_('Permissions for %') % resource)
|
print_header(_('Permissions for %s') % resource)
|
def run(self): resource = self.get_required_option('resource') perms = self.perm_conn.show_permissions(resource) if perms is None: system_exit(os.EX_SOFTWARE) print_header(_('Permissions for %') % resource) for user, operations in perms['users'].items(): print ' %s \t%-25s' % (user, ', '.join(operations))
|
assert (found_a["packages"].has_key(test_pkg_name)) assert (found_b["packages"].has_key(test_pkg_name)) assert (len(found_a["packages"][test_pkg_name]) == 1) assert (len(found_b["packages"][test_pkg_name]) == 1) pkgVerA = found_a["packages"][test_pkg_name][0] pkgVerB = found_b["packages"][test_pkg_name][0]
|
found_a_pid = None for pid in found_a["packages"].keys(): if (pid.index(test_pkg_name) >= 0): found_a_pid = pid assert(found_a_pid != None) found_b_pid = None for pid in found_b["packages"].keys(): if (pid.index(test_pkg_name) >= 0): found_b_pid = pid assert(found_b_pid != None) packagea = found_a["packages"][found_a_pid] packageb = found_b["packages"][found_b_pid]
|
def test_sync_two_repos_share_common_package(self): """ Sync 2 repos that share a common package, same NEVRA same checksum """ test_pkg_name = "pulp-test-package" my_dir = os.path.abspath(os.path.dirname(__file__)) repo_name_a = "test_two_repos_share_common_pkg_repo_A" repo_name_b = "test_two_repos_share_common_pkg_repo_B" datadir_a = my_dir + "/data/sameNEVRA_sameChecksums/A/repo/" datadir_b = my_dir + "/data/sameNEVRA_sameChecksums/B/repo/" # Create & Sync Repos repo_a = self.rapi.create(repo_name_a,'some name', 'x86_64', 'local:file://%s' % datadir_a) repo_b = self.rapi.create(repo_name_b,'some name', 'x86_64', 'local:file://%s' % datadir_b) self.rapi.sync(repo_a['id']) self.rapi.sync(repo_b['id']) # Look up each repo from API found_a = self.rapi.repository(repo_a['id']) found_b = self.rapi.repository(repo_b['id']) # Verify each repo has the test package synced assert (found_a["packages"].has_key(test_pkg_name)) assert (found_b["packages"].has_key(test_pkg_name)) # Grab the associated package version (there should only be 1) # Ensure that the package versions have different md5sums, but all other # keys are identical
|
assert(pkgVerA['_id'] == pkgVerB['_id'])
|
assert(packagea['_id'] == packageb['_id'])
|
def test_sync_two_repos_share_common_package(self): """ Sync 2 repos that share a common package, same NEVRA same checksum """ test_pkg_name = "pulp-test-package" my_dir = os.path.abspath(os.path.dirname(__file__)) repo_name_a = "test_two_repos_share_common_pkg_repo_A" repo_name_b = "test_two_repos_share_common_pkg_repo_B" datadir_a = my_dir + "/data/sameNEVRA_sameChecksums/A/repo/" datadir_b = my_dir + "/data/sameNEVRA_sameChecksums/B/repo/" # Create & Sync Repos repo_a = self.rapi.create(repo_name_a,'some name', 'x86_64', 'local:file://%s' % datadir_a) repo_b = self.rapi.create(repo_name_b,'some name', 'x86_64', 'local:file://%s' % datadir_b) self.rapi.sync(repo_a['id']) self.rapi.sync(repo_b['id']) # Look up each repo from API found_a = self.rapi.repository(repo_a['id']) found_b = self.rapi.repository(repo_b['id']) # Verify each repo has the test package synced assert (found_a["packages"].has_key(test_pkg_name)) assert (found_b["packages"].has_key(test_pkg_name)) # Grab the associated package version (there should only be 1) # Ensure that the package versions have different md5sums, but all other # keys are identical
|
handler = self.apihandler + method
|
handler = method if not handler.startswith(self.apihandler): handler = '/'.join((self.apihandler, handler))
|
def _request(self, request_type, method, info=None): handler = self.apihandler + method log.debug("_request calling: %s to host:port : %s:%s" % (handler, self.host, self.port)) if self.cert_file: log.info("Using SSLv3 context") context = SSL.Context("sslv3") context.load_cert(self.cert_file, keyfile=self.key_file) conn = httpslib.HTTPSConnection(self.host, self.port, ssl_context=context) else: conn = httplib.HTTPSConnection(self.host, self.port) log.debug("Request_type: %s" % request_type) log.debug("info: %s" % info) log.debug("headers: %s" % self.headers) conn.request(request_type, handler, body=json.dumps(info), headers=self.headers) response = conn.getresponse() if response.status == 404: return None self.validateResponse(response) rinfo = response.read() if not len(rinfo): return None return json.loads(rinfo)
|
try: assumeyes = cfg.client.assumeyes except:
|
cfg_assumeyes = cfg.client.assumeyes if cfg_assumeyes in ["True", "False"]: assumeyes = eval(cfg_assumeyes) else:
|
def install(self, packageinfo, reboot_suggested=False, assumeyes=False): """ Install packages by name. @param packageinfo: A list of strings for pkg names or tuples for name/arch info. @type packageinfo: str or tuple """ installed = [] yb = YumBase() log.info('installing packages: %s', packageinfo) for info in packageinfo: if isinstance(info, list): pkgs = yb.pkgSack.returnNewestByNameArch(tuple(info)) else: pkgs = yb.pkgSack.returnNewestByName(info) for p in pkgs: installed.append(str(p)) yb.tsInfo.addInstall(p) yb.resolveDeps() yb.processTransaction() if reboot_suggested: try: assumeyes = cfg.client.assumeyes except: assumeyes = assumeyes if assumeyes: self.__schedule_reboot() return (installed, {'reboot_performed' :True}) else: return (installed, {'reboot_performed' :False}) return (installed, None)
|
if assumeyes:
|
if assumeyes is True:
|
def install(self, packageinfo, reboot_suggested=False, assumeyes=False): """ Install packages by name. @param packageinfo: A list of strings for pkg names or tuples for name/arch info. @type packageinfo: str or tuple """ installed = [] yb = YumBase() log.info('installing packages: %s', packageinfo) for info in packageinfo: if isinstance(info, list): pkgs = yb.pkgSack.returnNewestByNameArch(tuple(info)) else: pkgs = yb.pkgSack.returnNewestByName(info) for p in pkgs: installed.append(str(p)) yb.tsInfo.addInstall(p) yb.resolveDeps() yb.processTransaction() if reboot_suggested: try: assumeyes = cfg.client.assumeyes except: assumeyes = assumeyes if assumeyes: self.__schedule_reboot() return (installed, {'reboot_performed' :True}) else: return (installed, {'reboot_performed' :False}) return (installed, None)
|
pinfo['epoch'],
|
epoch,
|
def _update_errata_packages(self, repoid, errataids=[], action=None): repo = self._get_existing_repo(repoid) addids = [] rmids = [] for erratumid in errataids: erratum = self.errataapi.erratum(erratumid) if erratum is None: log.info("No Erratum with id: %s found" % erratumid) continue for pkg in erratum['pkglist']: for pinfo in pkg['packages']: epkg = self.packageapi.package_by_ivera(pinfo['name'], pinfo['version'], pinfo['epoch'], pinfo['release'], pinfo['arch']) if epkg: addids.append(epkg['id']) rmids.append(epkg) if action == 'add': self.add_package(repo['id'], addids) elif action == 'delete': self.remove_packages(repo['id'], rmids)
|
def __init__(self, host, port, apihandler, cert_file=None, key_file=None, username=None, password=None):
|
def __init__(self, host, port, apihandler, apiprefix='/pulp/api', cert_file=None, key_file=None, username=None, password=None):
|
def __init__(self, host, port, apihandler, cert_file=None, key_file=None, username=None, password=None): self.host = host # ensure we have an integer, httpslib is picky about the type # passed in for the port self.port = int(port) self.apihandler = apihandler self.username = username self.password = password if (self.username != None): raw = "%s:%s" % (self.username, self.password) base64string = base64.encodestring(raw)[:-1] auth = "Basic %s" % base64string else: auth = None self.headers = {"Content-type":"application/json", "Authorization": auth, "Accept": "application/json", "Accept-Language": locale.getdefaultlocale()[0].lower().replace('_', '-')} self.cert_file = cert_file self.key_file = key_file
|
self.apihandler = apihandler
|
self.apihandler = ''.join((apiprefix, apihandler))
|
def __init__(self, host, port, apihandler, cert_file=None, key_file=None, username=None, password=None): self.host = host # ensure we have an integer, httpslib is picky about the type # passed in for the port self.port = int(port) self.apihandler = apihandler self.username = username self.password = password if (self.username != None): raw = "%s:%s" % (self.username, self.password) base64string = base64.encodestring(raw)[:-1] auth = "Basic %s" % base64string else: auth = None self.headers = {"Content-type":"application/json", "Authorization": auth, "Accept": "application/json", "Accept-Language": locale.getdefaultlocale()[0].lower().replace('_', '-')} self.cert_file = cert_file self.key_file = key_file
|
def __init__(self, host='localhost', port=8811, handler="", cert_file=None, key_file=None,
|
def __init__(self, host='localhost', port=443, handler="", cert_file=None, key_file=None,
|
def __init__(self, host='localhost', port=8811, handler="", cert_file=None, key_file=None, username=None, password=None): self.host = host self.port = port self.handler = handler self.conn = None self.cert_file = cert_file self.key_file = key_file self.username = username self.password = password # initialize connection self.setUp()
|
self.conn = Restlib(self.host, self.port, self.handler, self.cert_file, self.key_file, self.username, self.password) log.info("Connection Established for cli: Host: %s, Port: %s, handler: %s" %
|
self.conn = Restlib(self.host, self.port, self.handler, cert_file=self.cert_file, key_file=self.key_file, username=self.username, password=self.password) log.info("Connection Established for cli: Host: %s, Port: %s, handler: %s" %
|
def setUp(self): self.conn = Restlib(self.host, self.port, self.handler, self.cert_file, self.key_file, self.username, self.password) log.info("Connection Established for cli: Host: %s, Port: %s, handler: %s" % (self.host, self.port, self.handler)) log.info("Using cert_file: %s and key_file: %s" % (self.cert_file, self.key_file))
|
dir = self.opts.dir if dir: files += utils.processDirectory(dir, "rpm")
|
def run(self): id = self.get_required_option('id') files = self.args if not files: system_exit(os.EX_USAGE, _("need to provide at least one file to perform upload")) dir = self.opts.dir if dir: files += utils.processDirectory(dir, "rpm") uploadinfo = {} uploadinfo['repo'] = id for frpm in files: try: pkginfo = utils.processRPM(frpm) except FileError, e: print >> sys.stderr, _('error: %s') % e continue if not pkginfo.has_key('nvrea'): print _("Package %s is not an rpm; skipping") % frpm continue pkgstream = base64.b64encode(open(frpm).read()) status = self.pconn.upload(id, pkginfo, pkgstream) if status: print _(" successful uploaded [%s] to repo [ %s ]") % \ (pkginfo['pkgname'], id) else: print _(" failed to upload [%s] to repo [ %s ]") % \ (pkginfo['pkgname'], id)
|
|
ed = EventDispatcher() ed.start()
|
def main(): ed = EventDispatcher() ed.start() p = EventProducer() for n in range(0, 1): d = dict( id='repo%d' % n, name='Repository%d' % n, arch='noarch',) p.send('bogus', 'bogus') #p.send('user', 'user without subject') #p.send('user.hello', 'user.%d' % n) #p.send('user.created', '{%d} user.created' % n) #p.send('user.updated', '{%d} user.updated' % n) #p.send('user.deleted', '{%d} user-deleted' % n) p.send('repo.created', d) #p.send('repo.updated', d) #p.send('repo.deleted', d) p.send('product.created', d) sleep(3)
|
|
@audit('ConsumerApi', params=['id'])
|
@audit()
|
def create(self, id, description): """ Create a new Consumer object and return it """ consumer = self.consumer(id) if(consumer): raise PulpException("A Consumer with id %s already exists" % id) c = model.Consumer(id, description) self.insert(c) return c
|
Credentials.setuser(self.opts.username, self.opts.password)
|
username = self.opts.username password = self.opts.password if username and password: Credentials.setuser(username, password)
|
def setup_connections(self): # first take into account the new credentials Credentials.setuser(self.opts.username, self.opts.password) self.authconn = UserConnection()
|
self.rapi = productApi()
|
pass
|
def __init__(self): self.rapi = productApi()
|
global _cert_file, key_file
|
global _cert_file, _key_file
|
def set_cert_key_files(cert_file, key_file): global _cert_file, key_file assert None not in (cert_file, key_file) _cert_file = cert_file _key_file = key_file
|
role = _role_api.role(authorization.super_user_role)
|
role = self.role_api.role(authorization.super_user_role)
|
def test_super_users(self): role = _role_api.role(authorization.super_user_role) self.assertFalse(role is None)
|
(s, authorization.super_user_role, [n]))
|
s, authorization.super_user_role, [n])
|
def test_super_users_grant(self): s = self._create_resource() n = authorization.operation_to_name(authorization.READ) self.assertRaises(authorization.PulpAuthorizationError, authorization.grant_permission_to_role, (s, authorization.super_user_role, [n]))
|
(s, authorization.super_user_role, [n]))
|
s, authorization.super_user_role, [n])
|
def test_super_users_revoke(self): s = self._create_resource() n = authorization.operation_to_name(authorization.READ) self.assertRaises(authorization.PulpAuthorizationError, authorization.revoke_permission_from_role, (s, authorization.super_user_role, [n]))
|
(authorization.super_user_role, u['name']))
|
authorization.super_user_role, u['name'])
|
def test_super_users_remove(self): u = self._create_user() authorization.add_user_to_role(authorization.super_user_role, u['name']) self.assertRaises(authorization.PulpAuthorizationError, authorization.remove_user_from_role, (authorization.super_user_role, u['name']))
|
role = _role_api.role(authorization.consumer_users_role)
|
role = self.role_api.role(authorization.consumer_users_role)
|
def test_consumer_users(self): role = _role_api.role(authorization.consumer_users_role) self.assertFalse(role is None)
|
(s, authorization.consumer_users_role, [n]))
|
s, authorization.consumer_users_role, [n])
|
def test_consumer_users_grant(self): s = self._create_resource() n = authorization.operation_to_name(authorization.READ) self.assertRaises(authorization.PulpAuthorizationError, authorization.grant_permission_to_role, (s, authorization.consumer_users_role, [n]))
|
(s, authorization.consumer_users_role, [n]))
|
s, authorization.consumer_users_role, [n])
|
def test_consumer_users_revoke(self): s = self._create_resource() n = authorization.operation_to_name(authorization.READ) self.assertRaises(authorization.PulpAuthorizationError, authorization.revoke_permission_from_role, (s, authorization.consumer_users_role, [n]))
|
Get the configured auditing lifeteime as a datetime.timedelta instance.
|
Get the configured auditing lifetime as a datetime.timedelta instance.
|
def _get_lifetime(): """ Get the configured auditing lifeteime as a datetime.timedelta instance. @return: dateteime.timedelta instance """ days = config.config.getint('auditing', 'lifetime') return datetime.timedelta(days=days)
|
self.assertTrue(path in found[i])
|
self.assertTrue(path in found)
|
def test_repo_gpgkeys(self): id = 'fedora' relativepath = 'f11/i386' feed = 'yum:http://abc.com/%s' % relativepath repo = self.rapi.create(id, 'Fedora', 'noarch', feed=feed) keyA = ('keyA', 'MY KEY (A) CONTENT') keyB = ('keyB', 'MY KEY (B) CONTENT') keylist = [keyA, keyB] ks = KeyStore(relativepath) ks.clean() # multiple (2) keys self.rapi.addkeys(id, keylist) found = self.rapi.listkeys(id) for i in range(0, len(keylist)): path = os.path.join(relativepath, keylist[i][0]) self.assertTrue(path in found[i]) # single key ks.clean() self.rapi.addkeys(id, keylist[1:]) found = self.rapi.listkeys(id) path = os.path.join(relativepath, keylist[1][0]) self.assertEqual(len(found), 1) self.assertEqual(found[0], path)
|
class Login(AuthAction):
|
class Login(AuthAction, Command):
|
def setup_parser(self): self.parser.add_option('--username', dest='username', help=_('pulp account username')) self.parser.add_option('--password', dest='password', help=_('pulp account password'))
|
return self.conn.request_post(method, params=repodata)
|
return self.conn.request_put(method, params=repodata)
|
def create(self, id, name, arch, feed, sync_schedule=None): method = "/repositories/" repodata = {"id" : id, "name" : name, "arch" : arch, "feed" : feed, "sync_schedule" : sync_schedule,} return self.conn.request_post(method, params=repodata)
|
return self.conn.request_post(method, params=repo)
|
return self.conn.request_put(method, params=repo)
|
def update(self, repo): method = "/repositories/%s/" % repo['id'] return self.conn.request_post(method, params=repo)
|
return self.conn.request_get(method)
|
return self.conn.request_post(method)
|
def sync(self, repoid): method = "/repositories/%s/sync/" % repoid return self.conn.request_get(method)
|
return self.conn.request_get(method)
|
return self.conn.request_post(method)
|
def packages(self, repoid): method = "/repositories/%s/list/" % repoid return self.conn.request_get(method)
|
return self.conn.request_post(method, params=consumerdata)
|
return self.conn.request_put(method, params=consumerdata)
|
def create(self, id, description): consumerdata = {"id" : id, "description" : description} method = "/consumers/" return self.conn.request_post(method, params=consumerdata)
|
return self.conn.request_post(method, params=consumer)
|
return self.conn.request_put(method, params=consumer)
|
def update(self, consumer): method = "/consumers/%s/" % consumer['id'] return self.conn.request_post(method, params=consumer)
|
return self.conn.request_get(method)
|
return self.conn.request_post(method)
|
def packages(self, id): method = "/consumers/%s/packages/" % str(id) return self.conn.request_get(method)
|
return self.conn.request_post(method, params=repodata)
|
return self.conn.request_put(method, params=repodata)
|
def create(self, name, epoch, version, release, arch, description, checksum_type, checksum, filename): method = "/packages/" repodata = {"name" : name, "epoch" : epoch, "version" : version, "release" : release, "arch" : arch, "description" : description, "checksum_type" : checksum_type, "checksum": checksum, "filename": filename,} return self.conn.request_post(method, params=repodata)
|
repo["packages"] = _pkg_count(repo["packages"])
|
repo["packages"] = len(repo["packages"])
|
def _list(self): (self.options, self.args) = self.parser.parse_args() try: repos = self.pconn.repositories() columns = ["id", "name", "source", "arch", "packages"] data = [ _sub_dict(repo, columns) for repo in repos] if not len(data): print _("No repos available to list") sys.exit(0) print """+-------------------------------------------+\n List of Available Repositories \n+-------------------------------------------+""" for repo in data: repo["packages"] = _pkg_count(repo["packages"]) print constants.AVAILABLE_REPOS_LIST % (repo["id"], repo["name"], repo["source"], repo["arch"], repo["packages"] ) except RestlibException, re: log.error("Error: %s" % re) systemExit(re.code, re.msg) except Exception, e: log.error("Error: %s" % e) raise
|
pkg_count = _pkg_count(packages)
|
pkg_count = len(packages)
|
def _sync(self): (self.options, self.args) = self.parser.parse_args() if not self.options.label: print("repo label required. Try --help") sys.exit(0) try: status = self.pconn.sync(self.options.label) if status: packages = self.pconn.packages(self.options.label) pkg_count = _pkg_count(packages) print _(" Sync Successful. Repo [ %s ] now has a total of [ %s ] packages" % (self.options.label, pkg_count)) except RestlibException, re: log.error("Error: %s" % re) systemExit(re.code, re.msg) except Exception, e: log.error("Error: %s" % e) raise
|
def _pkg_count(pkgdict): count =0 for key, value in pkgdict.items(): count += len(value["versions"]) return count
|
def _upload(self): (self.options, files) = self.parser.parse_args() # ignore the command and pick the files files = files[2:] if not self.options.label: print("repo label required. Try --help") sys.exit(0) if self.options.dir: files += utils.processDirectory(self.options.dir, "rpm") if not files: print("Need to provide atleast one file to perform upload") sys.exit(0) uploadinfo = {} uploadinfo['repo'] = self.options.label for frpm in files: try: pkginfo = utils.processFile(frpm) except FileError, e: print('Error: %s' % e) continue if not pkginfo.has_key('nvrea'): print("Package %s is Not an RPM Skipping" % frpm) continue pkgstream = base64.b64encode(open(frpm).read()) try: status = self.pconn.upload(self.options.label, pkginfo, pkgstream) if status: print _(" Successful uploaded [%s] to Repo [ %s ] " % (pkginfo['pkgname'], self.options.label)) else: print _(" Failed to Upload %s to Repo [ %s ] " % self.options.label) except RestlibException, re: log.error("Error: %s" % re) raise #continue except Exception, e: log.error("Error: %s" % e) raise #continue
|
|
cmd = "createrepo --update %s" % (dir) if groups: cmd = "createrepo -g %s --update %s" % (groups, dir)
|
cmd = "createrepo -g %s --update %s" % (groups, dir) if not groups: cmd = "createrepo --update %s" % (dir) repodata_file = os.path.join(dir, "repodata", "repomd.xml") if os.path.isfile(repodata_file): log.info("Checking what metadata types are available: %s" % \ (util.get_repomd_filetypes(repodata_file))) if "group" in util.get_repomd_filetypes(repodata_file): comps_file = util.get_repomd_filetype_path( repodata_file, "group") comps_file = os.path.join(dir, comps_file) if comps_file and os.path.isfile(comps_file): cmd = "createrepo -g %s --update %s" % (comps_file, dir)
|
def create_repo(dir, groups=None): cmd = "createrepo --update %s" % (dir) if groups: cmd = "createrepo -g %s --update %s" % (groups, dir) status, out = commands.getstatusoutput(cmd) if status != 0: log.error("createrepo on %s failed" % dir) raise CreateRepoError(out) log.info("createrepo on %s finished" % dir) return status, out
|
log.info("createrepo on %s finished" % dir)
|
log.info("[%s] on %s finished" % (cmd, dir))
|
def create_repo(dir, groups=None): cmd = "createrepo --update %s" % (dir) if groups: cmd = "createrepo -g %s --update %s" % (groups, dir) status, out = commands.getstatusoutput(cmd) if status != 0: log.error("createrepo on %s failed" % dir) raise CreateRepoError(out) log.info("createrepo on %s finished" % dir) return status, out
|
'i386', 'yum:http://example.com/mypath', groupid="testgroup")
|
'i386', 'yum:http://example.com/mypath', groupid=["testgroup"])
|
def test_repository_with_groupid(self): repo = self.rapi.create('some-id', 'some name', \ 'i386', 'yum:http://example.com/mypath', groupid="testgroup") found = self.rapi.repository('some-id') assert(found is not None) assert(found['id'] == 'some-id') assert(found['groupid'] == ["testgroup"])
|
self.debug = 0
|
def __init__(self, name="cli", usage=None, shortdesc=None, description=None): self.shortdesc = shortdesc if shortdesc is not None and description is None: description = shortdesc self.debug = 0 self.parser = OptionParser(usage=usage, description=description) self._add_common_options() self.name = name self.killcount = 0 #GrinderLog.setup(self.debug)
|
|
default=0, help="debug level")
|
action="store_true", help="enable debug logging")
|
def _add_common_options(self): """ Add options that apply to all sub-commands. """ self.parser.add_option("--debug", dest="debug", default=0, help="debug level")
|
GrinderLog.setup(self.debug)
|
def __init__(self): usage = "usage: %prog rhn [OPTIONS]" shortdesc = "Fetches content from a rhn source." desc = "rhn" CliDriver.__init__(self, "rhn", usage, shortdesc, desc) GrinderLog.setup(self.debug) self.rhnSync = RHNSync()
|
|
GrinderLog.setup(self.debug)
|
def __init__(self): usage = "usage: %prog yum [OPTIONS]" shortdesc = "Fetches content from a yum repo." desc = "yum" CliDriver.__init__(self, "yum", usage, shortdesc, desc) GrinderLog.setup(self.debug)
|
|
def test_resync_removes_deleted_package(self):
|
def disabled_resync_removes_deleted_package(self):
|
def test_resync_removes_deleted_package(self): # Since a repo with 3 packages, simulate the repo source deleted 1 package # Re-sync ensure we delete the removed package repo_path = os.path.join(self.data_path, "repo_resync_a") r = self.rapi.create('test_resync_removes_deleted_package', 'test_name', 'x86_64', 'local:file://%s' % (repo_path)) self.assertTrue(r != None) self.rapi.sync(r["id"]) # Refresh object now it's been sync'd r = self.rapi.repository(r['id']) self.assertTrue(len(r["packages"]) == 3) expected_packages = ["pulp-dot-2.0-test-0.1.2-1.fc11.x86_64.rpm", "pulp-test-package-0.2.1-1.fc11.x86_64.rpm", "pulp-test-package-0.3.1-1.fc11.x86_64.rpm"] for ep in expected_packages: found = False for p in r["packages"].values(): if p["filename"] == ep: found = True self.assertTrue(found) # Simulate a change that a package was deleted repo_path = os.path.join(self.data_path, "repo_resync_b") r = self.rapi.repository(r["id"]) r["source"] = RepoSource("local:file://%s" % (repo_path)) self.rapi.update(r) self.rapi.sync(r["id"]) #Refresh Repo Object and Verify Changes r = self.rapi.repository(r["id"]) self.assertTrue(len(r["packages"]) == 2) removed_package = "pulp-dot-2.0-test-0.1.2-1.fc11.x86_64.rpm" expected_packages = ["pulp-test-package-0.2.1-1.fc11.x86_64.rpm", "pulp-test-package-0.3.1-1.fc11.x86_64.rpm"] for ep in expected_packages: found = False for p in r["packages"].values(): if p["filename"] == ep: found = True self.assertTrue(found) for p in r["packages"].values(): self.assertTrue(p["filename"] != removed_package)
|
ldapuser = config.get("ldap", "user") ldappass = config.get("ldap", "password") ldapserv = LDAPConnection(ldapuser, ldappass, ldapserver)
|
ldapserv = LDAPConnection(ldapserver)
|
def check_user_pass_on_ldap(self, username, password=None): ''' verify the credentials for user on ldap server. @param username: Userid to be validated on ldap server @param password: password credentials for userid @return: user instance of the authenticated user if valid credentials were specified; None otherwise @rtype: L{pulp.server.db.model.User} ''' if not config.has_section("ldap"): LOG.info("No external ldap server available") return ldapserver = config.get("ldap", "uri") base = config.get("ldap", "base") ldapuser = config.get("ldap", "user") ldappass = config.get("ldap", "password")
|
compspath = "./data/rhel-i386-server-5/comps.xml"
|
compspath = os.path.join(self.dataPath, "rhel-i386-server-5/comps.xml")
|
def test_import_groups_data(self): repo = self.rapi.create('test_import_groups_data_id', 'test_import_groups_data_id', 'i386', 'yum:http://example.com/') # Parse existing comps.xml compspath = "./data/rhel-i386-server-5/comps.xml" compsfile = open(compspath) base = BaseSynchronizer(self.config) base.import_groups_data(compsfile, repo) # 'repo' object should now contain groups/categories # we need to save it to the db so we can query from it self.rapi.update(repo) # Testing for expected values found = self.rapi.packagegroup(repo['id'], "web-server") self.assertTrue(found != None) self.assertTrue("httpd" in found['mandatory_package_names']) self.assertTrue("mod_auth_kerb" in found['optional_package_names']) self.assertTrue("mod_auth_mysql" in found['optional_package_names']) self.assertTrue("crypto-utils" in found['default_package_names']) self.assertTrue("distcache" in found['default_package_names']) # PackageGroupCategory, look up expected values, found = self.rapi.packagegroupcategory(repo['id'], "BAD_VALUE_NOT_IN_CATEGORY") self.assertTrue(found == None) found = self.rapi.packagegroupcategory(repo['id'], "development") self.assertTrue(found != None)
|
self.options.arch - "noarch"
|
self.options.arch = "noarch"
|
def _create(self): (self.options, self.args) = self.parser.parse_args() if not self.options.label: print("repo label required. Try --help") sys.exit(0) if not self.options.name: self.options.name = self.options.label if not self.options.arch: self.options.arch - "noarch" if not self.options.feed: print("repo feed required. Try --help") sys.exit(0) repoinfo = {"id" : self.options.label, "name" : self.options.name, "arch" : self.options.arch, "feed" : self.options.feed,} try: repo = self.pconn.create(repoinfo) print _(" Successfully created Repo [ %s ] with feed [ %s ]" % (repo['id'], repo["source"])) except Exception, e: log.error("Error: %s" % e) raise
|
for clazz in [ RepoCore, ConsumerCore]: cmd = clazz() if cmd.name != "cli": self.cli_cores[cmd.name] = cmd
|
if len(sys.argv) > 2 and sys.argv[1] == "repo": self.cli_cores["repo"] = RepoCore() elif len(sys.argv) > 2 and sys.argv[1] == "consumer": self.cli_cores["consumer"] = ConsumerCore() else: for clazz in [ RepoCore, ConsumerCore]: cmd = clazz() if cmd.name != "cli": self.cli_cores[cmd.name] = cmd
|
def __init__(self): self.cli_cores = {} for clazz in [ RepoCore, ConsumerCore]: cmd = clazz() # ignore the base class if cmd.name != "cli": self.cli_cores[cmd.name] = cmd
|
t.reset(args=[t.id])
|
t.args=[t.id]
|
def test_task(self): t = Task(thread_id) print 'task id: %s' % str(t.id) t.reset(args=[t.id]) t.run() time.sleep(0.0005) self.assertTrue(t.status == FINISHED)
|
t.reset(args=[t.id])
|
t.args=[t.id]
|
def test_multi_runs(self): t = Task(thread_id) print 'task id: %s' % str(t.id) t.reset(args=[t.id]) t.run() time.sleep(0.0005) self.assertTrue(t.status == FINISHED) t.run() time.sleep(0.0005) self.assertTrue(t.status == FINISHED)
|
c.packageids.append(package["packageid"])
|
repo = self.rapi.create('some-id', 'some name', 'i386', 'yum:http://example.com')
|
def test_consumerwithpackage(self): c = self.capi.create('test-consumer', 'some consumer desc') c.packageids.append(package["packageid"]) for i in range(10): repo = self.rapi.create('some-id','some name', 'i386', 'yum:http://example.com') package = self.rapi.create_package(repo.id, 'test_consumerwithpackage', 'test package search') repo = self.rapi.repository(repo["id"]) c.packageids.append(package["packageid"]) for i in range(10): package = self.rapi.create_package(repo['id'], randomString(), randomString()) c.packageids.append(package["packageid"]) self.capi.update(c) found = self.capi.consumerswithpackage('some-invalid-id') assert(len(found) == 0)
|
repo = self.rapi.create('some-id','some name', 'i386', 'yum:http://example.com') package = self.rapi.create_package(repo.id, 'test_consumerwithpackage',
|
package = self.rapi.create_package(repo["id"], 'test_consumerwithpackage',
|
def test_consumerwithpackage(self): c = self.capi.create('test-consumer', 'some consumer desc') c.packageids.append(package["packageid"]) for i in range(10): repo = self.rapi.create('some-id','some name', 'i386', 'yum:http://example.com') package = self.rapi.create_package(repo.id, 'test_consumerwithpackage', 'test package search') repo = self.rapi.repository(repo["id"]) c.packageids.append(package["packageid"]) for i in range(10): package = self.rapi.create_package(repo['id'], randomString(), randomString()) c.packageids.append(package["packageid"]) self.capi.update(c) found = self.capi.consumerswithpackage('some-invalid-id') assert(len(found) == 0)
|
repo = self.rapi.repository(repo["id"]) c.packageids.append(package["packageid"]) for i in range(10): package = self.rapi.create_package(repo['id'], randomString(), randomString())
|
repo = self.rapi.repository(repo["id"])
|
def test_consumerwithpackage(self): c = self.capi.create('test-consumer', 'some consumer desc') c.packageids.append(package["packageid"]) for i in range(10): repo = self.rapi.create('some-id','some name', 'i386', 'yum:http://example.com') package = self.rapi.create_package(repo.id, 'test_consumerwithpackage', 'test package search') repo = self.rapi.repository(repo["id"]) c.packageids.append(package["packageid"]) for i in range(10): package = self.rapi.create_package(repo['id'], randomString(), randomString()) c.packageids.append(package["packageid"]) self.capi.update(c) found = self.capi.consumerswithpackage('some-invalid-id') assert(len(found) == 0)
|
self.__dispatcher.daemon = True
|
self.__dispatcher.setDaemon(True)
|
def __init__(self, max_running=4, finished_lifetime=timedelta(seconds=3600)): """ @type max_running: int @param max_running: maximum number of tasks to run simultaneously None means indefinitely @type finished_lifetime: datetime.timedelta instance @param finished_lifetime: length of time to keep finished tasks @return: FIFOTaskQueue instance """ self.max_running = max_running self.finished_lifetime = finished_lifetime
|
while True: self.__condition.wait(self.__dispatcher_timeout) for task in self._get_tasks(): self.run(task) self._cancel_tasks() self._timeout_tasks() self._cull_tasks() except Exception: _log.critical('Exception in FIFO Queue Dispatch Thread\n%s' % ''.join(traceback.format_exception(*sys.exc_info()))) raise self.__lock.release()
|
try: while True: self.__condition.wait(self.__dispatcher_timeout) for task in self._get_tasks(): self.run(task) self._cancel_tasks() self._timeout_tasks() self._cull_tasks() except Exception: _log.critical('Exception in FIFO Queue Dispatch Thread\n%s' % ''.join(traceback.format_exception(*sys.exc_info()))) raise finally: self.__lock.release()
|
def _dispatch(self): """ Scheduling method that that executes the scheduling hooks. """ self.__lock.acquire() try: while True: self.__condition.wait(self.__dispatcher_timeout) for task in self._get_tasks(): self.run(task) self._cancel_tasks() self._timeout_tasks() self._cull_tasks() except Exception: _log.critical('Exception in FIFO Queue Dispatch Thread\n%s' % ''.join(traceback.format_exception(*sys.exc_info()))) raise #finally: self.__lock.release()
|
method = "/repositories/%s" % str(id)
|
method = "/repositories/%s/" % str(id)
|
def repository(self, id): method = "/repositories/%s" % str(id) return self.conn.request_get(method)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.