rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
@return The string hex representation of the fingerprint
|
@return: The string hex representation of the fingerprint
|
def FormatParamikoFingerprint(fingerprint): """Formats the fingerprint of L{paramiko.PKey.get_fingerprint()} @type fingerprint: str @param fingerprint: PKey fingerprint @return The string hex representation of the fingerprint """ assert len(fingerprint) % 2 == 0 return ":".join(re.findall(r"..", fingerprint.lower()))
|
my_groups = utils.NiceSort(all_groups.keys())
|
sorted_names = utils.NiceSort(name_to_uuid.keys()) my_groups = [name_to_uuid[n] for n in sorted_names]
|
def Exec(self, feedback_fn): """Computes the list of groups and their attributes.
|
name_to_uuid = dict((g.name, g.uuid) for g in all_groups.values())
|
def Exec(self, feedback_fn): """Computes the list of groups and their attributes.
|
|
for name in my_groups: group = all_groups[name]
|
for uuid in my_groups: group = all_groups[uuid]
|
def Exec(self, feedback_fn): """Computes the list of groups and their attributes.
|
disks = [{"size": d.size, "vg": d.vg} for d in instance.disks]
|
assert instance.disk_template == constants.DT_PLAIN disks = [{"size": d.size, "vg": d.logical_id[0]} for d in instance.disks]
|
def CheckPrereq(self): """Check prerequisites.
|
strict_host_check, private_key=None):
|
strict_host_check, private_key=None, quiet=True):
|
def _BuildSshOptions(self, batch, ask_key, use_cluster_key, strict_host_check, private_key=None): """Builds a list with needed SSH options.
|
private_key=None):
|
private_key=None, quiet=True):
|
def BuildCmd(self, hostname, user, command, batch=True, ask_key=False, tty=False, use_cluster_key=True, strict_host_check=True, private_key=None): """Build an ssh command to execute a command on a remote node.
|
argv = [constants.SSH, "-q"]
|
argv = [constants.SSH]
|
def BuildCmd(self, hostname, user, command, batch=True, ask_key=False, tty=False, use_cluster_key=True, strict_host_check=True, private_key=None): """Build an ssh command to execute a command on a remote node.
|
strict_host_check, private_key))
|
strict_host_check, private_key, quiet=quiet))
|
def BuildCmd(self, hostname, user, command, batch=True, ask_key=False, tty=False, use_cluster_key=True, strict_host_check=True, private_key=None): """Build an ssh command to execute a command on a remote node.
|
command = [constants.SCP, "-q", "-p"]
|
command = [constants.SCP, "-p"]
|
def CopyFileToNode(self, node, filename): """Copy a file to another node with scp.
|
self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGCHLD]) handle1 = self.mainloop.scheduler.enter(0.3, 2, self._SendSig, [signal.SIGCHLD]) handle2 = self.mainloop.scheduler.enter(0.4, 2, self._SendSig, [signal.SIGCHLD]) self.mainloop.scheduler.enter(0, 1, self._CancelEvent, [handle1]) self.mainloop.scheduler.enter(0, 1, self._CancelEvent, [handle2])
|
now = time.time() self.mainloop.scheduler.enterabs(now + 0.1, 1, self._SendSig, [signal.SIGCHLD]) handle1 = self.mainloop.scheduler.enterabs(now + 0.3, 2, self._SendSig, [signal.SIGCHLD]) handle2 = self.mainloop.scheduler.enterabs(now + 0.4, 2, self._SendSig, [signal.SIGCHLD]) self.mainloop.scheduler.enterabs(now + 0.2, 1, self._CancelEvent, [handle1]) self.mainloop.scheduler.enterabs(now + 0.2, 1, self._CancelEvent, [handle2])
|
def testDeferredCancel(self): self.mainloop.RegisterSignal(self) self.mainloop.scheduler.enter(0.1, 1, self._SendSig, [signal.SIGCHLD]) handle1 = self.mainloop.scheduler.enter(0.3, 2, self._SendSig, [signal.SIGCHLD]) handle2 = self.mainloop.scheduler.enter(0.4, 2, self._SendSig, [signal.SIGCHLD]) self.mainloop.scheduler.enter(0, 1, self._CancelEvent, [handle1]) self.mainloop.scheduler.enter(0, 1, self._CancelEvent, [handle2]) self.mainloop.scheduler.enter(0.5, 1, self._SendSig, [signal.SIGTERM]) self.mainloop.Run() self.assertEquals(self.sendsig_events, [signal.SIGCHLD, signal.SIGTERM]) self.assertEquals(self.onsignal_events, self.sendsig_events)
|
mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I) if not mac_check.match(mac):
|
if not _MAC_CHECK.match(mac):
|
def NormalizeAndValidateMac(mac): """Normalizes and check if a MAC address is valid. Checks whether the supplied MAC address is formally correct, only accepts colon separated format. Normalize it to all lower. @type mac: str @param mac: the MAC to be validated @rtype: str @return: returns the normalized and validated MAC. @raise errors.OpPrereqError: If the MAC isn't valid """ mac_check = re.compile("^([0-9a-f]{2}(:|$)){6}$", re.I) if not mac_check.match(mac): raise errors.OpPrereqError("Invalid MAC address specified: %s" % mac, errors.ECODE_INVAL) return mac.lower()
|
if len(ial.nodes) != ial.required_nodes:
|
if len(ial.result) != ial.required_nodes:
|
def _RunAllocator(self): """Run the allocator based on input opcode.
|
(self.op.iallocator, len(ial.nodes),
|
(self.op.iallocator, len(ial.result),
|
def _RunAllocator(self): """Run the allocator based on input opcode.
|
self.op.pnode = ial.nodes[0]
|
self.op.pnode = ial.result[0]
|
def _RunAllocator(self): """Run the allocator based on input opcode.
|
utils.CommaJoin(ial.nodes))
|
utils.CommaJoin(ial.result))
|
def _RunAllocator(self): """Run the allocator based on input opcode.
|
self.op.snode = ial.nodes[1]
|
self.op.snode = ial.result[1]
|
def _RunAllocator(self): """Run the allocator based on input opcode.
|
if len(ial.nodes) != ial.required_nodes:
|
if len(ial.result) != ial.required_nodes:
|
def _RunAllocator(lu, iallocator_name, instance_name, relocate_from): """Compute a new secondary node using an IAllocator.
|
len(ial.nodes), ial.required_nodes),
|
len(ial.result), ial.required_nodes),
|
def _RunAllocator(lu, iallocator_name, instance_name, relocate_from): """Compute a new secondary node using an IAllocator.
|
remote_node_name = ial.nodes[0]
|
remote_node_name = ial.result[0]
|
def _RunAllocator(lu, iallocator_name, instance_name, relocate_from): """Compute a new secondary node using an IAllocator.
|
self.success = self.info = self.nodes = None
|
self.success = self.info = self.result = None
|
def __init__(self, cfg, rpc, mode, **kwargs): self.cfg = cfg self.rpc = rpc # init buffer variables self.in_text = self.out_text = self.in_data = self.out_data = None # init all input fields so that pylint is happy self.mode = mode self.mem_size = self.disks = self.disk_template = None self.os = self.tags = self.nics = self.vcpus = None self.hypervisor = None self.relocate_from = None self.name = None # computed fields self.required_nodes = None # init result fields self.success = self.info = self.nodes = None if self.mode == constants.IALLOCATOR_MODE_ALLOC: keyset = self._ALLO_KEYS fn = self._AddNewInstance elif self.mode == constants.IALLOCATOR_MODE_RELOC: keyset = self._RELO_KEYS fn = self._AddRelocateInstance else: raise errors.ProgrammerError("Unknown mode '%s' passed to the" " IAllocator" % self.mode) for key in kwargs: if key not in keyset: raise errors.ProgrammerError("Invalid input parameter '%s' to" " IAllocator" % key) setattr(self, key, kwargs[key]) for key in keyset: if key not in kwargs: raise errors.ProgrammerError("Missing input parameter '%s' to" " IAllocator" % key) self._BuildInputData(fn)
|
for key in "success", "info", "nodes":
|
if "nodes" in rdict and "result" not in rdict: rdict["result"] = rdict["nodes"] del rdict["nodes"] for key in "success", "info", "result":
|
def _ValidateResult(self): """Process the allocator results.
|
if not isinstance(rdict["nodes"], list): raise errors.OpExecError("Can't parse iallocator results: 'nodes' key"
|
if not isinstance(rdict["result"], list): raise errors.OpExecError("Can't parse iallocator results: 'result' key"
|
def _ValidateResult(self): """Process the allocator results.
|
raise Exception("No opcodes")
|
raise errors.GenericError("A job needs at least one opcode")
|
def __init__(self, queue, job_id, ops): """Constructor for the _QueuedJob.
|
desc = '%r' % fn now = str(datetime.datetime.now())
|
desc = "%r" % fn desc = desc.rstrip(".") tstart = datetime.datetime.now()
|
def RunTest(fn, *args): """Runs a test after printing a header. """ if fn.__doc__: desc = fn.__doc__.splitlines()[0].strip() else: desc = '%r' % fn now = str(datetime.datetime.now()) print print '---', now, ('-' * (55 - len(now))) print desc print '-' * 60 return fn(*args)
|
print '---', now, ('-' * (55 - len(now))) print desc print '-' * 60 return fn(*args)
|
print _FormatHeader("%s start %s" % (tstart, desc)) try: retval = fn(*args) return retval finally: tstop = datetime.datetime.now() tdelta = tstop - tstart print _FormatHeader("%s time=%s %s" % (tstop, tdelta, desc))
|
def RunTest(fn, *args): """Runs a test after printing a header. """ if fn.__doc__: desc = fn.__doc__.splitlines()[0].strip() else: desc = '%r' % fn now = str(datetime.datetime.now()) print print '---', now, ('-' * (55 - len(now))) print desc print '-' * 60 return fn(*args)
|
msg = "hostname mistmatch"
|
msg = "hostname mismatch"
|
def VerifyNodeHostname(self, node): """Verify hostname consistency via SSH.
|
lu_result = 1
|
lu_result = 0
|
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): """Analyze the post-hooks' result
|
return self._checkIntVariable('lock')
|
return bool(self._checkIntVariable("lock"))
|
def useLocking(self): """Check if the request specifies locking.
|
return self._checkIntVariable('bulk')
|
return bool(self._checkIntVariable("bulk"))
|
def useBulk(self): """Check if the request specifies bulk querying.
|
return self._checkIntVariable('force')
|
return bool(self._checkIntVariable("force"))
|
def useForce(self): """Check if the request specifies a forced operation.
|
return self._checkIntVariable('dry-run')
|
return bool(self._checkIntVariable("dry-run"))
|
def dryRun(self): """Check if the request specifies dry-run mode.
|
self.proc.LogWarning("Communication failure to node %s" % node_name)
|
self.proc.LogWarning("Communication failure to node %s", node_name)
|
def RunPhase(self, phase): """Run all the scripts for a phase.
|
return utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
|
entry = utils.ReadWatcherPauseFile(constants.WATCHER_PAUSEFILE)
|
def Exec(self, feedback_fn): """Dump a representation of the cluster config to the standard output.
|
logging.error("Cannot set memory lock: %s" %
|
logging.error("Cannot set memory lock: %s",
|
def Mlockall(): """Lock current process' virtual address space into RAM. This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE), see mlock(2) for more details. This function requires ctypes module. """ if ctypes is None: logging.warning("Cannot set memory lock, ctypes module not found") return libc = ctypes.cdll.LoadLibrary("libc.so.6") if libc is None: logging.error("Cannot set memory lock, ctypes cannot load libc") return # Some older version of the ctypes module don't have built-in functionality # to access the errno global variable, where function error codes are stored. # By declaring this variable as a pointer to an integer we can then access # its value correctly, should the mlockall call fail, in order to see what # the actual error code was. libc.__errno_location.restype = ctypes.POINTER(ctypes.c_int) if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE): logging.error("Cannot set memory lock: %s" % os.strerror(libc.__errno_location().contents.value)) return logging.debug("Memory lock set")
|
live = bool(self._checkIntVariable("live", default=1)) op = opcodes.OpMigrateNode(node_name=node_name, live=live)
|
if "live" in self.queryargs and "mode" in self.queryargs: raise http.HttpBadRequest("Only one of 'live' and 'mode' should" " be passed") elif "live" in self.queryargs: if self._checkIntVariable("live", default=1): mode = constants.HT_MIGRATION_LIVE else: mode = constants.HT_MIGRATION_NONLIVE else: mode = self._checkStringVariable("mode", default=None) op = opcodes.OpMigrateNode(node_name=node_name, mode=mode)
|
def POST(self): """Migrate all primary instances from a node.
|
sync = threading.Condition()
|
sync = threading.Event()
|
def testMixedAcquireTimeout(self): sync = threading.Condition()
|
sync.acquire() try: sync.wait() finally: sync.release()
|
sync.wait()
|
def _AcquireShared(ev): if not self.sl.acquire(shared=1, timeout=None): return
|
exclsync = threading.Condition()
|
exclsync = threading.Event()
|
def _AcquireShared(ev): if not self.sl.acquire(shared=1, timeout=None): return
|
exclsync.acquire() try: exclsync.wait() finally: exclsync.release()
|
exclsync.wait()
|
def _AcquireExclusive(): if not self.sl.acquire(shared=0): return
|
sync.acquire() try: sync.notifyAll() finally: sync.release()
|
sync.set()
|
def _AcquireExclusive(): if not self.sl.acquire(shared=0): return
|
exclsync.acquire() try: exclsync.notifyAll() finally: exclsync.release()
|
exclsync.set()
|
def _AcquireSharedSimple(): if self.sl.acquire(shared=1, timeout=None): self.done.put("shared2") self.sl.release()
|
""" def __init__(self, request, args, expiry):
|
@ivar sent: the set of contacted peers @ivar rcvd: the set of peers who replied """ def __init__(self, request, args, expiry, sent):
|
def handle_datagram(self, payload, ip, port): self.client.HandleResponse(payload, ip, port)
|
self._requests[request.rsalt] = _Request(request, args, expire_time)
|
self._requests[request.rsalt] = _Request(request, args, expire_time, targets)
|
def SendRequest(self, request, args=None, coverage=None, async=True): """Send a confd request to some MCs
|
raise errors.HypervisorError("Can't run lxc-ls: %s" % result.output)
|
raise errors.HypervisorError("Running lxc-ls failed: %s" % result.output)
|
def ListInstances(self): """Get the list of running instances.
|
raise errors.HypervisorError("Can't run lxc-info: %s" % result.output)
|
raise errors.HypervisorError("Running lxc-info failed: %s" % result.output)
|
def GetInstanceInfo(self, instance_name): """Get instance properties.
|
raise HypervisorError("Cannot create instance directory: %s", str(err))
|
raise HypervisorError("Creating instance directory failed: %s", str(err))
|
def StartInstance(self, instance, block_devices): """Start an instance.
|
raise HypervisorError("Can't mount the chroot dir: %s" % result.output)
|
raise HypervisorError("Mounting the root dir of LXC instance %s" " failed: %s" % (instance.name, result.output))
|
def StartInstance(self, instance, block_devices): """Start an instance.
|
raise HypervisorError("Can't run 'poweroff' for the instance: %s" % result.output)
|
raise HypervisorError("Running 'poweroff' on the instance" " failed: %s" % result.output)
|
def StopInstance(self, instance, force=False, retry=False, name=None): """Stop an instance.
|
raise HypervisorError("Can't umount the chroot dir: %s (%s)" %
|
raise HypervisorError("Unmounting the chroot dir failed: %s (%s)" %
|
def StopInstance(self, instance, force=False, retry=False, name=None): """Stop an instance.
|
raise HypervisorError("Migration not supported by the LXC hypervisor")
|
raise HypervisorError("Migration is not supported by the LXC hypervisor")
|
def MigrateInstance(self, instance, target, live): """Migrate an instance.
|
result = self.rpc.call_instance_os_add(inst.primary_node, inst, True, 0)
|
result = self.rpc.call_instance_os_add(inst.primary_node, inst, True, self.op.debug_level)
|
def Exec(self, feedback_fn): """Reinstall the instance.
|
old_name, 0)
|
old_name, self.op.debug_level)
|
def Exec(self, feedback_fn): """Reinstall the instance.
|
result = self.rpc.call_instance_os_add(pnode_name, iobj, False, 0)
|
result = self.rpc.call_instance_os_add(pnode_name, iobj, False, self.op.debug_level)
|
def Exec(self, feedback_fn): """Create and add the instance to the cluster.
|
cluster_name, 0)
|
cluster_name, self.op.debug_level)
|
def Exec(self, feedback_fn): """Create and add the instance to the cluster.
|
idx, 0)
|
idx, self.op.debug_level)
|
def Exec(self, feedback_fn): """Export an instance to an image in the cluster.
|
_WaitForNodeDaemon(master_name) def _WaitForNodeDaemon(node_name): """Wait for node daemon to become responsive. """
|
def _InitGanetiServerSetup(master_name): """Setup the necessary configuration for the initial node daemon. This creates the nodepass file containing the shared password for the cluster and also generates the SSL certificate. """ GenerateSelfSignedSslCert(constants.SSL_CERT_FILE) # Don't overwrite existing file if not os.path.exists(constants.RAPI_CERT_FILE): GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE) if not os.path.exists(constants.HMAC_CLUSTER_KEY): GenerateHmacKey(constants.HMAC_CLUSTER_KEY) result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED]) if result.failed: raise errors.OpExecError("Could not start the node daemon, command %s" " had exitcode %s and error %s" % (result.cmd, result.exit_code, result.output)) # Wait for node daemon to become responsive def _CheckNodeDaemon(): result = rpc.RpcRunner.call_version([master_name])[master_name] if result.fail_msg: raise utils.RetryAgain() try: utils.Retry(_CheckNodeDaemon, 1.0, 10.0) except utils.RetryTimeout: raise errors.OpExecError("Node daemon didn't answer queries within" " 10 seconds")
|
|
result = rpc.RpcRunner.call_version([master_name])[master_name]
|
result = rpc.RpcRunner.call_version([node_name])[node_name]
|
def _CheckNodeDaemon(): result = rpc.RpcRunner.call_version([master_name])[master_name] if result.fail_msg: raise utils.RetryAgain()
|
raise errors.OpExecError("Node daemon didn't answer queries within" " 10 seconds")
|
raise errors.OpExecError("Node daemon on %s didn't answer queries within" " 10 seconds" % node_name)
|
def _CheckNodeDaemon(): result = rpc.RpcRunner.call_version([master_name])[master_name] if result.fail_msg: raise utils.RetryAgain()
|
def DeclareLocks(self, level):
|
def DeclareLocks(self, lu, level):
|
def DeclareLocks(self, level): """Declare locks for this query.
|
def DeclareLocks(self, _):
|
def DeclareLocks(self, lu, level):
|
def DeclareLocks(self, _): pass
|
self.assertEquals(compat.TryToRoman(1), "I") self.assertEquals(compat.TryToRoman(4), "IV") self.assertEquals(compat.TryToRoman(5), "V")
|
self.assertEquals(compat.TryToRoman(1), 1) self.assertEquals(compat.TryToRoman(4), 4) self.assertEquals(compat.TryToRoman(5), 5)
|
def testAFewIntegers(self): self.assertEquals(compat.TryToRoman(0), 0) self.assertEquals(compat.TryToRoman(1), "I") self.assertEquals(compat.TryToRoman(4), "IV") self.assertEquals(compat.TryToRoman(5), "V")
|
super(BaseWorker, self).__init__()
|
super(BaseWorker, self).__init__(name=worker_id)
|
def __init__(self, pool, worker_id): """Constructor for BaseWorker thread.
|
if (op == SOCKOP_RECV and event & (select.POLLNVAL | select.POLLHUP | select.POLLERR)): return ""
|
if event & (select.POLLNVAL | select.POLLHUP | select.POLLERR): break
|
def SocketOperation(sock, op, arg1, timeout): """Wrapper around socket functions. This function abstracts error handling for socket operations, especially for the complicated interaction with OpenSSL. @type sock: socket @param sock: Socket for the operation @type op: int @param op: Operation to execute (SOCKOP_* constants) @type arg1: any @param arg1: Parameter for function (if needed) @type timeout: None or float @param timeout: Timeout in seconds or None @return: Return value of socket function """ # TODO: event_poll/event_check/override if op in (SOCKOP_SEND, SOCKOP_HANDSHAKE): event_poll = select.POLLOUT elif op == SOCKOP_RECV: event_poll = select.POLLIN elif op == SOCKOP_SHUTDOWN: event_poll = None # The timeout is only used when OpenSSL requests polling for a condition. # It is not advisable to have no timeout for shutdown. assert timeout else: raise AssertionError("Invalid socket operation") # Handshake is only supported by SSL sockets if (op == SOCKOP_HANDSHAKE and not isinstance(sock, OpenSSL.SSL.ConnectionType)): return # No override by default event_override = 0 while True: # Poll only for certain operations and when asked for by an override if event_override or op in (SOCKOP_SEND, SOCKOP_RECV, SOCKOP_HANDSHAKE): if event_override: wait_for_event = event_override else: wait_for_event = event_poll event = WaitForSocketCondition(sock, wait_for_event, timeout) if event is None: raise HttpSocketTimeout() if (op == SOCKOP_RECV and event & (select.POLLNVAL | select.POLLHUP | select.POLLERR)): return "" if not event & wait_for_event: continue # Reset override event_override = 0 try: try: if op == SOCKOP_SEND: return sock.send(arg1) elif op == SOCKOP_RECV: return sock.recv(arg1) elif op == SOCKOP_SHUTDOWN: if isinstance(sock, OpenSSL.SSL.ConnectionType): # PyOpenSSL's shutdown() doesn't take arguments return sock.shutdown() else: return sock.shutdown(arg1) elif op == SOCKOP_HANDSHAKE: return sock.do_handshake() except OpenSSL.SSL.WantWriteError: # OpenSSL wants to write, poll for POLLOUT event_override = select.POLLOUT continue except OpenSSL.SSL.WantReadError: # OpenSSL wants to read, poll for POLLIN event_override = select.POLLIN | select.POLLPRI continue except OpenSSL.SSL.WantX509LookupError: continue except OpenSSL.SSL.ZeroReturnError, err: # SSL Connection has been closed. In SSL 3.0 and TLS 1.0, this only # occurs if a closure alert has occurred in the protocol, i.e. the # connection has been closed cleanly. Note that this does not # necessarily mean that the transport layer (e.g. a socket) has been # closed. if op == SOCKOP_SEND: # Can happen during a renegotiation raise HttpConnectionClosed(err.args) elif op == SOCKOP_RECV: return "" # SSL_shutdown shouldn't return SSL_ERROR_ZERO_RETURN raise socket.error(err.args) except OpenSSL.SSL.SysCallError, err: if op == SOCKOP_SEND: # arg1 is the data when writing if err.args and err.args[0] == -1 and arg1 == "": # errors when writing empty strings are expected # and can be ignored return 0 if err.args == (-1, _SSL_UNEXPECTED_EOF): if op == SOCKOP_RECV: return "" elif op == SOCKOP_HANDSHAKE: # Can happen if peer disconnects directly after the connection is # opened. raise HttpSessionHandshakeUnexpectedEOF(err.args) raise socket.error(err.args) except OpenSSL.SSL.Error, err: raise socket.error(err.args) except socket.error, err: if err.args and err.args[0] == errno.EAGAIN: # Ignore EAGAIN continue raise
|
buf = buf[:2]
|
buf = buf[2:]
|
def _ContinueParsing(self, buf, eof): """Main function for HTTP message state machine.
|
self._expire_requests = []
|
def __init__(self, hmac_key, peers, callback, port=None, logger=None): """Constructor for ConfdClient
|
|
while self._expire_requests: expire_time, rsalt = self._expire_requests[0] if now >= expire_time: self._expire_requests.pop(0) (request, args) = self._requests[rsalt]
|
for rsalt, rq in self._requests.items(): if now >= rq.expiry:
|
def ExpireRequests(self): """Delete all the expired requests.
|
orig_request=request, extra_args=args,
|
orig_request=rq.request, extra_args=rq.args,
|
def ExpireRequests(self): """Delete all the expired requests.
|
else: break
|
def ExpireRequests(self): """Delete all the expired requests.
|
|
self._requests[request.rsalt] = (request, args)
|
def SendRequest(self, request, args=None, coverage=None, async=True): """Send a confd request to some MCs
|
|
self._expire_requests.append((expire_time, request.rsalt))
|
self._requests[request.rsalt] = _Request(request, args, expire_time)
|
def SendRequest(self, request, args=None, coverage=None, async=True): """Send a confd request to some MCs
|
(request, args) = self._requests[salt]
|
rq = self._requests[salt]
|
def HandleResponse(self, payload, ip, port): """Asynchronous handler for a confd reply
|
orig_request=request,
|
orig_request=rq.request,
|
def HandleResponse(self, payload, ip, port): """Asynchronous handler for a confd reply
|
extra_args=args,
|
extra_args=rq.args,
|
def HandleResponse(self, payload, ip, port): """Asynchronous handler for a confd reply
|
disk_abort = not _WaitForSync(self, instance)
|
disk_abort = not _WaitForSync(self, instance, disks=[disk])
|
def Exec(self, feedback_fn): """Execute disk grow.
|
@ivar lock_status: In-memory locking information for debugging
|
def Serialize(self): """Serializes this _QueuedOpCode.
|
|
"lock_status", "change",
|
def Serialize(self): """Serializes this _QueuedOpCode.
|
|
self.lock_status = None
|
def __init__(self, queue, job_id, ops): """Constructor for the _QueuedJob.
|
|
obj.lock_status = None
|
def Restore(cls, queue, state): """Restore a _QueuedJob from serialized state:
|
|
elif fname == "lock_status": row.append(self.lock_status)
|
def GetInfo(self, fields): """Returns information about a job.
|
|
self._job.lock_status = None
|
def NotifyStart(self): """Mark the opcode as running, not lock-waiting.
|
|
self._job.lock_status = msg
|
def ReportLocks(self, msg): """Write locking information to the job.
|
|
job.lock_status = None
|
def RunTask(self, job): # pylint: disable-msg=W0221 """Job executor.
|
|
if mc_remaining != mc_should:
|
if mc_remaining < mc_should:
|
def CheckPrereq(self): """Check prerequisites.
|
"start_timestamp", "end_timestamp",
|
"start_timestamp", "exec_timestamp", "end_timestamp",
|
def TimeStampNow(): """Returns the current timestamp. @rtype: tuple @return: the current time in the (seconds, microseconds) format """ return utils.SplitTime(time.time())
|
request_body = property(fget=lambda self: self._req.private.body_data)
|
def _GetRequestBody(self): """Returns the body data. """ return self._req.private.body_data request_body = property(fget=_GetRequestBody)
|
def __init__(self, items, queryargs, req): """Generic resource constructor.
|
if (existing_node.primary_ip != primary_ip or existing_node.secondary_ip != secondary_ip):
|
if existing_node.secondary_ip != secondary_ip:
|
def CheckPrereq(self): """Check prerequisites.
|
elif "size" not in ddict: raise errors.OpPrereqError("Missing size for disk %d" % didx) try: ddict["size"] = utils.ParseUnit(ddict["size"]) except ValueError, err: raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % (didx, err))
|
elif "size" in ddict: if "adopt" in ddict: raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" " (disk %d)" % didx) try: ddict["size"] = utils.ParseUnit(ddict["size"]) except ValueError, err: raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % (didx, err)) elif "adopt" in ddict: if mode == constants.INSTANCE_IMPORT: raise errors.OpPrereqError("Disk adoption not allowed for instance" " import") ddict["size"] = 0 else: raise errors.OpPrereqError("Missing size or adoption source for" " disk %d" % didx)
|
def GenericInstanceCreate(mode, opts, args): """Add an instance to the cluster via either creation or import. @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT @param opts: the command line options selected by the user @type args: list @param args: should contain only one element, the new instance name @rtype: int @return: the desired exit code """ instance = args[0] (pnode, snode) = SplitNodeOption(opts.node) hypervisor = None hvparams = {} if opts.hypervisor: hypervisor, hvparams = opts.hypervisor if opts.nics: try: nic_max = max(int(nidx[0]) + 1 for nidx in opts.nics) except ValueError, err: raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) nics = [{}] * nic_max for nidx, ndict in opts.nics: nidx = int(nidx) if not isinstance(ndict, dict): msg = "Invalid nic/%d value: expected dict, got %s" % (nidx, ndict) raise errors.OpPrereqError(msg) nics[nidx] = ndict elif opts.no_nics: # no nics nics = [] else: # default of one nic, all auto nics = [{}] if opts.disk_template == constants.DT_DISKLESS: if opts.disks or opts.sd_size is not None: raise errors.OpPrereqError("Diskless instance but disk" " information passed") disks = [] else: if not opts.disks and not opts.sd_size: raise errors.OpPrereqError("No disk information specified") if opts.disks and opts.sd_size is not None: raise errors.OpPrereqError("Please use either the '--disk' or" " '-s' option") if opts.sd_size is not None: opts.disks = [(0, {"size": opts.sd_size})] try: disk_max = max(int(didx[0]) + 1 for didx in opts.disks) except ValueError, err: raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) disks = [{}] * disk_max for didx, ddict in opts.disks: didx = int(didx) if not isinstance(ddict, dict): msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) raise errors.OpPrereqError(msg) elif "size" not in ddict: raise errors.OpPrereqError("Missing size for disk %d" % didx) try: ddict["size"] = utils.ParseUnit(ddict["size"]) except ValueError, err: raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % (didx, err)) disks[didx] = ddict utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) if mode == constants.INSTANCE_CREATE: start = opts.start os_type = opts.os src_node = None src_path = None elif mode == constants.INSTANCE_IMPORT: start = False os_type = None src_node = opts.src_node src_path = opts.src_dir else: raise errors.ProgrammerError("Invalid creation mode %s" % mode) op = opcodes.OpCreateInstance(instance_name=instance, disks=disks, disk_template=opts.disk_template, nics=nics, pnode=pnode, snode=snode, ip_check=opts.ip_check, name_check=opts.name_check, wait_for_sync=opts.wait_for_sync, file_storage_dir=opts.file_storage_dir, file_driver=opts.file_driver, iallocator=opts.iallocator, hypervisor=hypervisor, hvparams=hvparams, beparams=opts.beparams, mode=mode, start=start, os_type=os_type, src_node=src_node, src_path=src_path) SubmitOrSend(op, opts) return 0
|
if not ignore_primary or node != instance.primary_node:
|
if ((node == instance.primary_node and not ignore_primary) or (node != instance.primary_node and not result.offline)):
|
def _ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False): """Shutdown block devices of an instance. This does the shutdown on all nodes of the instance. If the ignore_primary is false, errors on the primary node are ignored. """ all_result = True disks = _ExpandCheckDisks(instance, disks) for disk in disks: for node, top_disk in disk.ComputeNodeTree(instance.primary_node): lu.cfg.SetDiskID(top_disk, node) result = lu.rpc.call_blockdev_shutdown(node, top_disk) msg = result.fail_msg if msg: lu.LogWarning("Could not shutdown block device %s on node %s: %s", disk.iv_name, node, msg) if not ignore_primary or node != instance.primary_node: all_result = False return all_result
|
raise errors.OpExecError("Cannot shutdow instance disks, unable to"
|
raise errors.OpExecError("Cannot shutdown instance disks, unable to"
|
def Exec(self, feedback_fn): """Modifies an instance.
|
def FeedbackFn(ts, log_type, log_msg): """Feedback logging function for http case.
|
def FeedbackFn(msg): """Feedback logging function for jobs.
|
def FeedbackFn(ts, log_type, log_msg): # pylint: disable-msg=W0613 """Feedback logging function for http case. We don't have a stdout for printing log messages, so log them to the http log at least. @param ts: the timestamp (unused) """ logging.info("%s: %s", log_type, log_msg)
|
@param ts: the timestamp (unused) """
|
@param msg: the message """ (_, log_type, log_msg) = msg
|
def FeedbackFn(ts, log_type, log_msg): # pylint: disable-msg=W0613 """Feedback logging function for http case. We don't have a stdout for printing log messages, so log them to the http log at least. @param ts: the timestamp (unused) """ logging.info("%s: %s", log_type, log_msg)
|
result = self.rpc.call_os_get(pnode, self.op.os_type) result.Raise("OS '%s' not in supported OS list for primary node %s" % (self.op.os_type, pnode), prereq=True, ecode=errors.ECODE_INVAL) if not self.op.force_variant: _CheckOSVariant(result.payload, self.op.os_type)
|
_CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
|
def CheckPrereq(self): """Check prerequisites.
|
result = self.rpc.call_os_get(pnode.name, self.op.os_type) result.Raise("OS '%s' not in supported os list for primary node %s" % (self.op.os_type, pnode.name), prereq=True, ecode=errors.ECODE_INVAL) if not self.op.force_variant: _CheckOSVariant(result.payload, self.op.os_type)
|
_CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
|
def CheckPrereq(self): """Check prerequisites.
|
@raise errors.OpProgrammerError: if the nodes parameter is wrong type
|
@raise errors.ProgrammerError: if the nodes parameter is wrong type
|
def _GetWantedNodes(lu, nodes): """Returns list of checked and expanded node names. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @type nodes: list @param nodes: list of node names or None for all nodes @rtype: list @return: the list of nodes, sorted @raise errors.OpProgrammerError: if the nodes parameter is wrong type """ if not isinstance(nodes, list): raise errors.OpPrereqError("Invalid argument type 'nodes'", errors.ECODE_INVAL) if not nodes: raise errors.ProgrammerError("_GetWantedNodes should only be called with a" " non-empty list of nodes whose name is to be expanded.") wanted = [] for name in nodes: node = _ExpandNodeName(lu.cfg, name) wanted.append(node) return utils.NiceSort(wanted)
|
self._queue.acquire(shared=1) try: assert self._op.status in (constants.OP_STATUS_WAITLOCK, constants.OP_STATUS_CANCELING) self._job.lock_status = None if self._op.status == constants.OP_STATUS_CANCELING: raise CancelJob() self._op.status = constants.OP_STATUS_RUNNING self._op.exec_timestamp = TimeStampNow() finally: self._queue.release()
|
assert self._op.status in (constants.OP_STATUS_WAITLOCK, constants.OP_STATUS_CANCELING) self._job.lock_status = None if self._op.status == constants.OP_STATUS_CANCELING: raise CancelJob() self._op.status = constants.OP_STATUS_RUNNING self._op.exec_timestamp = TimeStampNow() self._queue.UpdateJobUnlocked(self._job)
|
def NotifyStart(self): """Mark the opcode as running, not lock-waiting.
|
parts.append(",".join(names))
|
parts.append(",".join(sorted(names)))
|
def _ReportLocks(self, level, names, shared, timeout, acquired, result): """Reports lock operations.
|
args = (msg, prereq)
|
args = (msg, ecode)
|
def Raise(self, msg, prereq=False, ecode=None): """If the result has failed, raise an OpExecError.
|
("beparams", None, _TOr(_TDictOf(_TNonEmptyString, _TDict), _TNone)),
|
("beparams", None, _TOr(_TDict, _TNone)),
|
def Exec(self, feedback_fn): """Rename the cluster.
|
filled_hvp = objects.FillDict(cluster.hvparams[instance.hypervisor], instance.hvparams)
|
filled_hvp = cluster.FillHV(instance)
|
def CheckPrereq(self): """Check prerequisites.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.