rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if( type(value) != self._metas[key][0] ) :
if key in self._metaTypes and type(value) != self._metaTypes[key] :
def set_metadata(self, key, value, notify=True): """Sets the value of a meta data.""" if( value == None ): return if key not in self._metas : raise Exception("This key does not exist : " + key)
self._metas[key][1] = value valType = self._metas[key][0]
self._metaValues[key] = value valType = self._metaTypes[key]
def set_metadata(self, key, value, notify=True): """Sets the value of a meta data.""" if( value == None ): return if key not in self._metas : raise Exception("This key does not exist : " + key)
if key not in self._metas :
if key not in self._metaTypes :
def get_metadata(self, key): """Gets the value of a meta data.""" if key not in self._metas : raise Exception("This key does not exist : " + key) return self._metas[key][1]
return self._metas[key][1]
return self._metaValues.get(key)
def get_metadata(self, key): """Gets the value of a meta data.""" if key not in self._metas : raise Exception("This key does not exist : " + key) return self._metas[key][1]
for k in self._metas.keys(): valType = self._metas[k][0] value = self._metas[k][1]
for k in self._metaValues.keys(): valType = self._metaTypes[k] value = self._metaValues[k]
def simulate_full_data_change(self): for k in self._metas.keys(): valType = self._metas[k][0] value = self._metas[k][1] self.notify_listeners(("metadata_changed", k, value, valType))
Annotation.extend_ad_hoc_slots("visualStyle", int, None)
def initialise_standard_metadata(): """Declares the standard keys used by the Node structures. Called at the end of this file""" #we declare what are the node model ad hoc data we require: AbstractNode.extend_ad_hoc_slots("position", list, [0,0], "posx", "posy") Node.extend_ad_hoc_slots("userColor", list, None, "user_color") Node.extend_ad_hoc_slots("useUserColor", bool, True, "use_user_color", ) Annotation.extend_ad_hoc_slots("text", str, "", "txt")
return 'IFloat(min=%d, max=%d, step=%d)' % \
return 'IInt(min=%d, max=%d, step=%d)' % \
def __repr__(self): default_min = -2**24 default_max = 2**24 default_step = 1 if (self.min == default_min and self.max == default_max and self.step == default_step): return self.__class__.__name__ else: return 'IFloat(min=%d, max=%d, step=%d)' % \ (self.min, self.max, self.step)
if(isinstance(outlist, tuple) and len(outlist) == 1):
if hasattr(outlist, "__getitem__") and len(outlist) == 1:
def eval(self): """ Evaluate the node by calling __call__ Return True if the node need a reevaluation and a timed delay if the node need a reevaluation at a later time. """ # lazy evaluation if self.block and self.get_nb_output() != 0 and self.output(0) is not None: return False if (self.delay==0 and self.lazy) and not self.modified: return False
px = xactor.internal_data.get('posx', 0) py = yactor.internal_data.get('posx', 0)
px = xactor.get_ad_hoc_dict().get_metadata('position')[0] py = yactor.get_ad_hoc_dict().get_metadata('position')[0]
def cmp_posx(x, y): """todo""" (xpid, xvid, xactor) = x (ypid, yvid, yactor) = y px = xactor.internal_data.get('posx', 0) py = yactor.internal_data.get('posx', 0) ret = cmp(px, py) if (not ret): ret = cmp(xpid, ypid) # reverse order return ret
res = pkgman.search_node("command")
res = pkgman.search_node("sum")
def test_search(): pkgman = PackageManager() pkgman.load_directory("./") assert 'Test' in pkgman res = pkgman.search_node("command") print res assert "command" in res[0].name # comment these 3 lines because system.command is not part # of any nodes anymore. #res = pkgman.search_node("system.command") #print res #assert "command" in res[0].name
assert "command" in res[0].name
assert "sum" in res[0].name
def test_search(): pkgman = PackageManager() pkgman.load_directory("./") assert 'Test' in pkgman res = pkgman.search_node("command") print res assert "command" in res[0].name # comment these 3 lines because system.command is not part # of any nodes anymore. #res = pkgman.search_node("system.command") #print res #assert "command" in res[0].name
self.op.secondary_ip]
self.op.secondary_ip, self.op.ndparams]
def CheckArguments(self): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) all_mods = [self.op.offline, self.op.master_candidate, self.op.drained, self.op.master_capable, self.op.vm_capable, self.op.secondary_ip] if all_mods.count(None) == len(all_mods): raise errors.OpPrereqError("Please pass at least one modification", errors.ECODE_INVAL) if all_mods.count(True) > 1: raise errors.OpPrereqError("Can't set the node into more than one" " state at the same time", errors.ECODE_INVAL)
cmd = ['gnt-node', 'add', "--no-ssh-key-check"]
cmd = ['gnt-node', 'add']
def _NodeAdd(node, readd=False): master = qa_config.GetMasterNode() if not readd and node.get('_added', False): raise qa_error.Error("Node %s already in cluster" % node['primary']) elif readd and not node.get('_added', False): raise qa_error.Error("Node %s not yet in cluster" % node['primary']) cmd = ['gnt-node', 'add', "--no-ssh-key-check"] if node.get('secondary', None): cmd.append('--secondary-ip=%s' % node['secondary']) if readd: cmd.append('--readd') cmd.append(node['primary']) AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0) node['_added'] = True
self.sock = httplib.FakeSocket(sock, ssl)
if self._SUPPORT_FAKESOCKET: self.sock = httplib.FakeSocket(sock, ssl) else: self.sock = _SslSocketWrapper(ssl) class _SslSocketWrapper(object): def __init__(self, sock): """Initializes this class. """ self._sock = sock def __getattr__(self, name): """Forward everything to underlying socket. """ return getattr(self._sock, name) def makefile(self, mode, bufsize): """Fake makefile method. makefile() on normal file descriptors uses dup2(2), which doesn't work with SSL sockets and therefore is not implemented by pyOpenSSL. This fake method works with the httplib module, but might not work for other modules. """ return socket._fileobject(self._sock, mode, bufsize)
def connect(self): """Connect to the server specified when the object was created.
("name", _NoDefault, _TNonEmptyString),
("name", _NoDefault, _TMaybeString),
def CheckPrereq(self): """Check prerequisites.
("name", _NoDefault, _TNonEmptyString),
("name", _NoDefault, _TMaybeString),
def Exec(self, feedback_fn): """Returns the tag list.
("name", _NoDefault, _TNonEmptyString),
("name", _NoDefault, _TMaybeString),
def Exec(self, feedback_fn): """Sets the tag.
class AsyncUDPSocket(asyncore.dispatcher):
class GanetiBaseAsyncoreDispatcher(asyncore.dispatcher): """Base Ganeti Asyncore Dispacher """ def handle_error(self): """Log an error in handling any request, and proceed. """ logging.exception("Error while handling asyncore request") def writable(self): """Most of the time we don't want to check for writability. """ return False class AsyncUDPSocket(GanetiBaseAsyncoreDispatcher):
def __init__(self, timefunc): sched.scheduler.__init__(self, timefunc, AsyncoreDelayFunction)
asyncore.dispatcher.__init__(self)
GanetiBaseAsyncoreDispatcher.__init__(self)
def __init__(self): """Constructor for AsyncUDPSocket
def handle_error(self): """Log an error in handling any request, and proceed. """ logging.exception("Error while handling asyncore request")
def handle_write(self): if not self._out_queue: logging.error("handle_write called with empty output queue") return (ip, port, payload) = self._out_queue[0] utils.IgnoreSignals(self.sendto, payload, 0, (ip, port)) self._out_queue.pop(0)
"REMOVE_INSTANCE": int(self.remove_instance),
"REMOVE_INSTANCE": str(bool(self.remove_instance)),
def BuildHooksEnv(self): """Build hooks env.
if all_mods.count(None) == 3:
if all_mods.count(None) == len(all_mods):
def CheckArguments(self): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] if all_mods.count(None) == 3: raise errors.OpPrereqError("Please pass at least one modification", errors.ECODE_INVAL) if all_mods.count(True) > 1: raise errors.OpPrereqError("Can't set the node into more than one" " state at the same time", errors.ECODE_INVAL)
self.offline_or_drain = (self.op.offline == True or self.op.drained == True) self.deoffline_or_drain = (self.op.offline == False or self.op.drained == False)
def CheckArguments(self): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] if all_mods.count(None) == 3: raise errors.OpPrereqError("Please pass at least one modification", errors.ECODE_INVAL) if all_mods.count(True) > 1: raise errors.OpPrereqError("Can't set the node into more than one" " state at the same time", errors.ECODE_INVAL)
self.offline_or_drain)
self.op.offline == True or self.op.drained == True)
def CheckArguments(self): self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] if all_mods.count(None) == 3: raise errors.OpPrereqError("Please pass at least one modification", errors.ECODE_INVAL) if all_mods.count(True) > 1: raise errors.OpPrereqError("Can't set the node into more than one" " state at the same time", errors.ECODE_INVAL)
errors.ECODE_INVAL) if (self.op.master_candidate == True and ((node.offline and not self.op.offline == False) or (node.drained and not self.op.drained == False))): raise errors.OpPrereqError("Node '%s' is offline or drained, can't set" " to master_candidate" % node.name, errors.ECODE_INVAL)
errors.ECODE_STATE) self.old_flags = old_flags = (node.master_candidate, node.drained, node.offline) assert old_flags in self._F2R, "Un-handled old flags %s" % str(old_flags) self.old_role = self._F2R[old_flags] for attr in self._FLAGS: if (getattr(self.op, attr) == False and getattr(node, attr) == False): self.LogInfo("Ignoring request to unset flag %s, already unset", attr) setattr(self.op, attr, None)
def CheckPrereq(self): """Check prerequisites.
if (self.deoffline_or_drain and not self.offline_or_drain and not self.op.master_candidate == True and not node.master_candidate): self.op.master_candidate = _DecideSelfPromotion(self) if self.op.master_candidate: self.LogInfo("Autopromoting node to master candidate") return
if self.op.drained == False or self.op.offline == False: if _DecideSelfPromotion(self): self.op.master_candidate = True self.LogInfo("Auto-promoting node to master candidate")
def CheckPrereq(self): """Check prerequisites.
changed_mc = False if self.op.offline is not None: node.offline = self.op.offline result.append(("offline", str(self.op.offline))) if self.op.offline == True: if node.master_candidate: node.master_candidate = False changed_mc = True result.append(("master_candidate", "auto-demotion due to offline")) if node.drained: node.drained = False result.append(("drained", "clear drained status due to offline")) if self.op.master_candidate is not None: node.master_candidate = self.op.master_candidate changed_mc = True result.append(("master_candidate", str(self.op.master_candidate))) if self.op.master_candidate == False: rrc = self.rpc.call_node_demote_from_mc(node.name) msg = rrc.fail_msg if msg: self.LogWarning("Node failed to demote itself: %s" % msg) if self.op.drained is not None: node.drained = self.op.drained result.append(("drained", str(self.op.drained))) if self.op.drained == True: if node.master_candidate: node.master_candidate = False changed_mc = True result.append(("master_candidate", "auto-demotion due to drain")) rrc = self.rpc.call_node_demote_from_mc(node.name) msg = rrc.fail_msg if msg: self.LogWarning("Node failed to demote itself: %s" % msg) if node.offline: node.offline = False result.append(("offline", "clear offline status due to drain"))
changed_mc = [old_role, new_role].count(self._ROLE_CANDIDATE) == 1 if (old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE and new_role != old_role): msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg if msg: self.LogWarning("Node failed to demote itself: %s", msg) new_flags = self._R2F[new_role] for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS): if of != nf: result.append((desc, str(nf))) (node.master_candidate, node.drained, node.offline) = new_flags
def Exec(self, feedback_fn): """Modifies a node.
def LogWarning(self, msg):
def LogWarning(self, msg, *args, **kwargs):
def LogWarning(self, msg): pass
def LogInfo(self, msg):
def LogInfo(self, msg, *args, **kwargs):
def LogInfo(self, msg): pass
_TDictOf(_TElemOf(["mac", "ip", "bridge"]), _TNonEmptyString)))),
_TDictOf(_TElemOf(["mac", "ip", "bridge"]), _TOr(_TNone, _TNonEmptyString))))),
def _ValidateResult(self): """Process the allocator results.
(node.name, node.master_candidate, node.drain,
(node.name, node.master_candidate, node.drained,
def _UnlockedVerifyConfig(self): """Verify function.
self.op.disks)
self.op.disks, False)
def ExpandNames(self): self._ExpandAndLockInstance()
self.op.iallocator, self.op.remote_node, [])
self.op.iallocator, self.op.remote_node, [], True)
def ExpandNames(self): self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) if self.op.node_name is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, errors.ECODE_NOENT)
disks):
disks, delay_iallocator):
def __init__(self, lu, instance_name, mode, iallocator_name, remote_node, disks): """Initializes this class.
"parameters", "api_versions")
"parameters", "api_versions", _HID, _BLK)
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False): """Check that mirrors are not degraded. The ldisk parameter, if True, will change the test from the is_degraded attribute (which represents overall non-ok status for the device(s)) to the ldisk (representing the local storage status). """ lu.cfg.SetDiskID(dev, node) result = True if on_primary or dev.AssembleOnSecondary(): rstats = lu.rpc.call_blockdev_find(node, dev) msg = rstats.fail_msg if msg: lu.LogWarning("Can't find disk on node %s: %s", node, msg) result = False elif not rstats.payload: lu.LogWarning("Can't find disk on node %s", node) result = False else: if ldisk: result = result and rstats.payload.ldisk_status == constants.LDS_OKAY else: result = result and not rstats.payload.is_degraded if dev.children: for child in dev.children: result = result and _CheckDiskConsistency(lu, child, node, on_primary) return result
req_size = _ComputeDiskSize(self.op.disk_template, self.disks) if req_size is not None and not self.adopt_disks: _CheckNodesFreeDisk(self, nodenames, req_size) if self.adopt_disks:
if not self.adopt_disks: req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks) _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes) else:
def CheckPrereq(self): """Check prerequisites.
_CheckNodesFreeDisk(self, nodenames, self.op.amount)
_CheckNodesFreeDiskPerVG(self, nodenames, {self.disk.physical_id[0]: self.op.amount})
def CheckPrereq(self): """Check prerequisites.
disks = [{"size": d.size} for d in instance.disks] required = _ComputeDiskSize(self.op.disk_template, disks) _CheckNodesFreeDisk(self, [self.op.remote_node], required)
disks = [{"size": d.size, "vg": d.vg} for d in instance.disks] required = _ComputeDiskSizePerVG(self.op.disk_template, disks) _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
def CheckPrereq(self): """Check prerequisites.
return env, nl, nl
nl_post = list(self.instance.all_nodes) + nl return env, nl, nl_post
def BuildHooksEnv(self): """Build hooks env.
return env, nl, nl
nl_post = list(nl) nl_post.append(source_node) return env, nl, nl_post
def BuildHooksEnv(self): """Build hooks env.
nl = [ self.cfg.GetMasterNode(), self.instance.primary_node, ]
nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
def BuildHooksEnv(self): """Build hooks env.
if up.server_reply.answer != self._answers[salt].answer:
if (up.server_reply.serial == self._answers[salt].serial and up.server_reply.answer != self._answers[salt].answer):
def _HandleReply(self, up): """Handle a single confd reply, and decide whether to filter it.
else: names = sorted(names)
def acquire(self, names, timeout=None, shared=0, test_notify=None): """Acquire a set of resource locks.
for lname in utils.UniqueSequence(names):
for lname in sorted(utils.UniqueSequence(names)):
def __acquire_inner(self, names, want_all, shared, timeout_fn, test_notify): """Inner logic for acquiring a number of locks.
SSH_CONFIG_DIR = "/etc/ssh/" SSH_HOST_DSA_PRIV = SSH_CONFIG_DIR + "ssh_host_dsa_key"
SSH_CONFIG_DIR = _autoconf.SSH_CONFIG_DIR SSH_HOST_DSA_PRIV = SSH_CONFIG_DIR + "/ssh_host_dsa_key"
def SplitVersion(version): """Splits version number stored in an int. Returns: tuple; (major, minor, revision) """ assert isinstance(version, int) (major, remainder) = divmod(version, 1000000) (minor, revision) = divmod(remainder, 10000) return (major, minor, revision)
SSH_HOST_RSA_PRIV = SSH_CONFIG_DIR + "ssh_host_rsa_key"
SSH_HOST_RSA_PRIV = SSH_CONFIG_DIR + "/ssh_host_rsa_key"
def SplitVersion(version): """Splits version number stored in an int. Returns: tuple; (major, minor, revision) """ assert isinstance(version, int) (major, remainder) = divmod(version, 1000000) (minor, revision) = divmod(remainder, 10000) return (major, minor, revision)
mycommand = ("%s start %s -b '%s'" % (constants.DAEMON_UTIL, constants.NODED, bind_address))
mycommand = ("%s stop-all; %s start %s -b '%s'" % (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED, bind_address))
def SetupNodeDaemon(cluster_name, node, ssh_key_check): """Add a node to the cluster. This function must be called before the actual opcode, and will ssh to the remote node, copy the needed files, and start ganeti-noded, allowing the master to do the rest via normal rpc calls. @param cluster_name: the cluster name @param node: the name of the new node @param ssh_key_check: whether to do a strict key check """ family = ssconf.SimpleStore().GetPrimaryIPFamily() sshrunner = ssh.SshRunner(cluster_name, ipv6=family==netutils.IP6Address.family) noded_cert = utils.ReadFile(constants.NODED_CERT_FILE) rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE) confd_hmac_key = utils.ReadFile(constants.CONFD_HMAC_KEY) # in the base64 pem encoding, neither '!' nor '.' are valid chars, # so we use this to detect an invalid certificate; as long as the # cert doesn't contain this, the here-document will be correctly # parsed by the shell sequence below. HMAC keys are hexadecimal strings, # so the same restrictions apply. for content in (noded_cert, rapi_cert, confd_hmac_key): if re.search('^!EOF\.', content, re.MULTILINE): raise errors.OpExecError("invalid SSL certificate or HMAC key") if not noded_cert.endswith("\n"): noded_cert += "\n" if not rapi_cert.endswith("\n"): rapi_cert += "\n" if not confd_hmac_key.endswith("\n"): confd_hmac_key += "\n" bind_address = constants.IP4_ADDRESS_ANY if family == netutils.IP6Address.family: bind_address = constants.IP6_ADDRESS_ANY # set up inter-node password and certificate and restarts the node daemon # and then connect with ssh to set password and start ganeti-noded # note that all the below variables are sanitized at this point, # either by being constants or by the checks above sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE) sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE) sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY) mycommand = ("%s start %s -b '%s'" % (constants.DAEMON_UTIL, constants.NODED, bind_address)) result = sshrunner.Run(node, 'root', mycommand, batch=False, ask_key=ssh_key_check, use_cluster_key=True, strict_host_check=ssh_key_check) if result.failed: raise errors.OpExecError("Remote command on node %s, error: %s," " output: %s" % (node, result.fail_reason, result.output)) _WaitForNodeDaemon(node)
feedback_fn("* Verifying oprhan instances")
feedback_fn("* Verifying orphan instances")
def Exec(self, feedback_fn): """Verify integrity of cluster, performing various test on nodes.
if name not in state:
if name not in state and hasattr(self, name):
def __setstate__(self, state): """Generic unserializer.
m = _ANS1_TIME_REGEX.match(value)
m = _ASN1_TIME_REGEX.match(value)
def _ParseAsn1Generalizedtime(value): """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL. @type value: string @param value: ASN1 GENERALIZEDTIME timestamp """ m = _ANS1_TIME_REGEX.match(value) if m: # We have an offset asn1time = m.group(1) hours = int(m.group(2)) minutes = int(m.group(3)) utcoffset = (60 * hours) + minutes else: if not value.endswith("Z"): raise ValueError("Missing timezone") asn1time = value[:-1] utcoffset = 0 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S") tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset) return calendar.timegm(tt.utctimetuple())
wanted = [] for name in nodes: node = _ExpandNodeName(lu.cfg, name) wanted.append(node)
wanted = [_ExpandNodeName(lu.cfg, name) for name in nodes]
def _GetWantedNodes(lu, nodes): """Returns list of checked and expanded node names. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @type nodes: list @param nodes: list of node names or None for all nodes @rtype: list @return: the list of nodes, sorted @raise errors.ProgrammerError: if the nodes parameter is wrong type """ if not isinstance(nodes, list): raise errors.OpPrereqError("Invalid argument type 'nodes'", errors.ECODE_INVAL) if not nodes: raise errors.ProgrammerError("_GetWantedNodes should only be called with a" " non-empty list of nodes whose name is to be expanded.") wanted = [] for name in nodes: node = _ExpandNodeName(lu.cfg, name) wanted.append(node) return utils.NiceSort(wanted)
except socket.timeout, err: raise TimeoutError("Receive timeout: %s" % str(err))
def Recv(self): """Try to receive a message from the socket.
cmd = ['gnt-instance', 'modify', '-t', 'drbd', '-n', snode, instance['name']]
cmd = ['gnt-instance', 'modify', '-t', 'drbd', '-n', snode['primary'], instance['name']]
def TestInstanceConvertDisk(instance, snode): """gnt-instance modify -t""" master = qa_config.GetMasterNode() cmd = ['gnt-instance', 'modify', '-t', 'plain', instance['name']] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0) cmd = ['gnt-instance', 'modify', '-t', 'drbd', '-n', snode, instance['name']] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0)
def GetOnlineNodes(nodes, cl=None, nowarn=False):
def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, filter_master=False):
def GetOnlineNodes(nodes, cl=None, nowarn=False): """Returns the names of online nodes. This function will also log a warning on stderr with the names of the online nodes. @param nodes: if not empty, use only this subset of nodes (minus the offline ones) @param cl: if not None, luxi client to use @type nowarn: boolean @param nowarn: by default, this function will output a note with the offline nodes that are skipped; if this parameter is True the note is not displayed """ if cl is None: cl = GetClient() result = cl.QueryNodes(names=nodes, fields=["name", "offline"], use_locking=False) offline = [row[0] for row in result if row[1]] if offline and not nowarn: ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) return [row[0] for row in result if not row[1]]
result = cl.QueryNodes(names=nodes, fields=["name", "offline"],
if secondary_ips: name_idx = 2 else: name_idx = 0 if filter_master: master_node = cl.QueryConfigValues(["master_node"])[0] filter_fn = lambda x: x != master_node else: filter_fn = lambda _: True result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
def GetOnlineNodes(nodes, cl=None, nowarn=False): """Returns the names of online nodes. This function will also log a warning on stderr with the names of the online nodes. @param nodes: if not empty, use only this subset of nodes (minus the offline ones) @param cl: if not None, luxi client to use @type nowarn: boolean @param nowarn: by default, this function will output a note with the offline nodes that are skipped; if this parameter is True the note is not displayed """ if cl is None: cl = GetClient() result = cl.QueryNodes(names=nodes, fields=["name", "offline"], use_locking=False) offline = [row[0] for row in result if row[1]] if offline and not nowarn: ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) return [row[0] for row in result if not row[1]]
return [row[0] for row in result if not row[1]]
return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
def GetOnlineNodes(nodes, cl=None, nowarn=False): """Returns the names of online nodes. This function will also log a warning on stderr with the names of the online nodes. @param nodes: if not empty, use only this subset of nodes (minus the offline ones) @param cl: if not None, luxi client to use @type nowarn: boolean @param nowarn: by default, this function will output a note with the offline nodes that are skipped; if this parameter is True the note is not displayed """ if cl is None: cl = GetClient() result = cl.QueryNodes(names=nodes, fields=["name", "offline"], use_locking=False) offline = [row[0] for row in result if row[1]] if offline and not nowarn: ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) return [row[0] for row in result if not row[1]]
from the cluster defaults
from the node group defaults
def SimpleFillND(self, ndparams): """Fill a given ndparams dict with defaults.
if not netutils.TcpPing(target, port, live_port_needed=True): raise errors.HypervisorError("Remote host %s not listening on port" " %s, cannot migrate" % (target, port))
def MigrateInstance(self, instance, target, live): """Migrate an instance to a target node.
kvm_cmd.extend(['-usbdevice', 'tablet'])
def _GenerateKVMRuntime(self, instance, block_devices): """Generate KVM information to start an instance.
exp_nic_count = int(export_info.get(constants.INISECT_INS, 'nic_count'))
try: exp_nic_count = export_info.getint(constants.INISECT_INS, 'nic_count') except (TypeError, ValueError), err: raise errors.OpPrereqError("Invalid export file, nic_count is not" " an integer: %s" % str(err), errors.ECODE_STATE)
def CheckPrereq(self): """Check prerequisites.
@type filename = str @param filename = path to write certificate to
@type filename: str @param filename: path to write certificate to
def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN, validity=constants.X509_CERT_DEFAULT_VALIDITY): """Legacy function to generate self-signed X509 certificate. @type filename = str @param filename = path to write certificate to @type common_name: string @param common_name: commonName value @type validity: int @param validity: validity of certificate in number of days """ # TODO: Investigate using the cluster name instead of X505_CERT_CN for # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI # and node daemon certificates have the proper Subject/Issuer. (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name, validity * 24 * 60 * 60) WriteFile(filename, mode=0400, data=key_pem + cert_pem)
("dry_run", False, _TBool),
def _CheckOSParams(lu, required, nodenames, osname, osparams): """OS parameters validation. @type lu: L{LogicalUnit} @param lu: the logical unit for which we check @type required: boolean @param required: whether the validation should fail if the OS is not found @type nodenames: list @param nodenames: the list of nodes on which we should check @type osname: string @param osname: the name of the hypervisor we should use @type osparams: dict @param osparams: the parameters which we need to check @raise errors.OpPrereqError: if the parameters are not valid """ result = lu.rpc.call_os_validate(required, nodenames, osname, [constants.OS_VALIDATE_PARAMETERS], osparams) for node, nres in result.items(): # we don't check for offline cases since this should be run only # against the master node and/or an instance's nodes nres.Raise("OS Parameters validation failed on node %s" % node) if not nres.payload: lu.LogInfo("OS %s not found on node %s, validation skipped", osname, node)
if res.offline:
if res.offline or msg:
def HooksCallBack(self, phase, hooks_results, feedback_fn, lu_result): """Analyze the post-hooks' result
if new_info[1] != ini_info[1] or new_info[5] < ini_info[5]:
if (new_info is not None and (new_info[1] != ini_info[1] or new_info[5] < ini_info[5])):
def _CheckInstance(): new_info = self.GetInstanceInfo(instance.name)
def GetPVInfo(vg_names, filter_allocatable=True):
def _GetVolumeInfo(lvm_cmd, fields): """Returns LVM Volumen infos using lvm_cmd @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs" @param fields: Fields to return @return: A list of dicts each with the parsed fields """ if not fields: raise errors.ProgrammerError("No fields specified") sep = "|" cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered", "--separator=%s" % sep, "-o%s" % ",".join(fields)] result = utils.RunCmd(cmd) if result.failed: raise errors.CommandError("Can't get the volume information: %s - %s" % (result.fail_reason, result.output)) data = [] for line in result.stdout.splitlines(): splitted_fields = line.strip().split(sep) if len(fields) != len(splitted_fields): raise errors.CommandError("Can't parse %s output: line '%s'" % (lvm_cmd, line)) data.append(splitted_fields) return data @classmethod def GetPVInfo(cls, vg_names, filter_allocatable=True):
def GetPVInfo(vg_names, filter_allocatable=True): """Get the free space info for PVs in a volume group.
sep = "|" command = ["pvs", "--noheadings", "--nosuffix", "--units=m", "-opv_name,vg_name,pv_free,pv_attr", "--unbuffered", "--separator=%s" % sep ] result = utils.RunCmd(command) if result.failed: logging.error("Can't get the PV information: %s - %s", result.fail_reason, result.output)
try: info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free", "pv_attr"]) except errors.GenericError, err: logging.error("Can't get PV information: %s", err)
def GetPVInfo(vg_names, filter_allocatable=True): """Get the free space info for PVs in a volume group.
for line in result.stdout.splitlines(): fields = line.strip().split(sep) if len(fields) != 4: logging.error("Can't parse pvs output: line '%s'", line) return None
for pv_name, vg_name, pv_free, pv_attr in info:
def GetPVInfo(vg_names, filter_allocatable=True): """Get the free space info for PVs in a volume group.
if filter_allocatable and fields[3][0] != 'a':
if filter_allocatable and pv_attr[0] != "a":
def GetPVInfo(vg_names, filter_allocatable=True): """Get the free space info for PVs in a volume group.
if vg_names and fields[1] not in vg_names:
if vg_names and vg_name not in vg_names:
def GetPVInfo(vg_names, filter_allocatable=True): """Get the free space info for PVs in a volume group.
data.append((float(fields[2]), fields[0], fields[1]))
data.append((float(pv_free), pv_name, vg_name)) return data @classmethod def GetVGInfo(cls, vg_names, filter_readonly=True): """Get the free space info for specific VGs. @param vg_names: list of volume group names, if empty all will be returned @param filter_readonly: whether to skip over readonly VGs @rtype: list @return: list of tuples (free_space, name) with free_space in mebibytes """ try: info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr"]) except errors.GenericError, err: logging.error("Can't get VG information: %s", err) return None data = [] for vg_name, vg_free, vg_attr in info: if filter_readonly and vg_attr[0] == "r": continue if vg_names and vg_name not in vg_names: continue data.append((float(vg_free), vg_name))
def GetPVInfo(vg_names, filter_allocatable=True): """Get the free space info for PVs in a volume group.
pvs_info = self.GetPVInfo([self._vg_name]) if not pvs_info: _ThrowError("Can't compute PV info for vg %s", self._vg_name) pvs_info.sort() pvs_info.reverse() free_size, _, _ = pvs_info[0]
vg_info = self.GetVGInfo([self._vg_name]) if not vg_info: _ThrowError("Can't compute VG info for vg %s", self._vg_name) free_size, _ = vg_info[0]
def Snapshot(self, size): """Create a snapshot copy of an lvm block device.
self.rapi.AddResponse("[ { \"id\": \"123\", \"uri\": \"\/2\/jobs\/123\" }," " { \"id\": \"124\", \"uri\": \"\2\/jobs\/124\" } ]")
self.rapi.AddResponse('[ { "id": "123", "uri": "\\/2\\/jobs\\/123" },' ' { "id": "124", "uri": "\\/2\\/jobs\\/124" } ]')
def testGetJobs(self): self.rapi.AddResponse("[ { \"id\": \"123\", \"uri\": \"\/2\/jobs\/123\" }," " { \"id\": \"124\", \"uri\": \"\2\/jobs\/124\" } ]") self.assertEqual([123, 124], self.client.GetJobs()) self.assertHandler(rlib2.R_2_jobs)
parser = OptionParser(option_list=parser_opts + [DEBUG_OPT],
parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
def _ParseArgs(argv, commands, aliases): """Parser for the command line arguments. This function parses the arguments and returns the function which must be executed together with its (modified) arguments. @param argv: the command line @param commands: dictionary with special contents, see the design doc for cmdline handling @param aliases: dictionary with command aliases {'alias': 'target, ...} """ if len(argv) == 0: binary = "<command>" else: binary = argv[0].split("/")[-1] if len(argv) > 1 and argv[1] == "--version": ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, constants.RELEASE_VERSION) # Quit right away. That way we don't have to care about this special # argument. optparse.py does it the same. sys.exit(0) if len(argv) < 2 or not (argv[1] in commands or argv[1] in aliases): # let's do a nice thing sortedcmds = commands.keys() sortedcmds.sort() ToStdout("Usage: %s {command} [options...] [argument...]", binary) ToStdout("%s <command> --help to see details, or man %s", binary, binary) ToStdout("") # compute the max line length for cmd + usage mlen = max([len(" %s" % cmd) for cmd in commands]) mlen = min(60, mlen) # should not get here... # and format a nice command list ToStdout("Commands:") for cmd in sortedcmds: cmdstr = " %s" % (cmd,) help_text = commands[cmd][4] help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0)) for line in help_lines: ToStdout("%-*s %s", mlen, "", line) ToStdout("") return None, None, None # get command, unalias it, and look it up in commands cmd = argv.pop(1) if cmd in aliases: if cmd in commands: raise errors.ProgrammerError("Alias '%s' overrides an existing" " command" % cmd) if aliases[cmd] not in commands: raise errors.ProgrammerError("Alias '%s' maps to non-existing" " command '%s'" % (cmd, aliases[cmd])) cmd = aliases[cmd] func, args_def, parser_opts, usage, description = commands[cmd] parser = OptionParser(option_list=parser_opts + [DEBUG_OPT], description=description, formatter=TitledHelpFormatter(), usage="%%prog %s %s" % (cmd, usage)) parser.disable_interspersed_args() options, args = parser.parse_args() if not _CheckArguments(cmd, args_def, args): return None, None, None return func, options, args
ntime_diff = abs(nvinfo_starttime - ntime_merged)
ntime_diff = "%.01fs" % abs(nvinfo_starttime - ntime_merged)
def Exec(self, feedback_fn): """Verify integrity of cluster, performing various test on nodes.
ntime_diff = abs(ntime_merged - nvinfo_endtime)
ntime_diff = "%.01fs" % abs(ntime_merged - nvinfo_endtime)
def Exec(self, feedback_fn): """Verify integrity of cluster, performing various test on nodes.
"Node time diverges by at least %0.1fs from master node time",
"Node time diverges by at least %s from master node time",
def Exec(self, feedback_fn): """Verify integrity of cluster, performing various test on nodes.
op = opcodes.OpDeActivateInstanceDisks(instance_name=instance_name)
op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name)
def PUT(self): """Deactivate disks for an instance.
if '(' in dev: return dev.split('(')[0]
return dev.split('(')[0] def handle_dev(dev): return [parse_dev(x) for x in dev.split(",")] def map_line(line): line = [v.strip() for v in line] return [{'name': line[0], 'size': line[1], 'dev': dev, 'vg': line[3]} for dev in handle_dev(line[2])] all_devs = [] for line in result.stdout.splitlines(): if line.count('|') >= 3: all_devs.extend(map_line(line.split('|')))
def parse_dev(dev): if '(' in dev: return dev.split('(')[0] else: return dev
return dev def map_line(line): return { 'name': line[0].strip(), 'size': line[1].strip(), 'dev': parse_dev(line[2].strip()), 'vg': line[3].strip(), } return [map_line(line.split('|')) for line in result.stdout.splitlines() if line.count('|') >= 3]
logging.warning("Strange line in the output from lvs: '%s'", line) return all_devs
def parse_dev(dev): if '(' in dev: return dev.split('(')[0] else: return dev
utils.WriteFile(destination, data=txt, gid=getents.confd_gid, mode=0640)
try: fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt, close=False, gid=getents.confd_gid, mode=0640) except errors.LockError: raise errors.ConfigurationError("The configuration file has been" " modified since the last write, cannot" " update") try: self._cfg_id = utils.GetFileID(fd=fd) finally: os.close(fd)
def _WriteConfig(self, destination=None, feedback_fn=None): """Write the configuration data to persistent storage.
_OP_REQP = ["instance_name", "disks", "disk_template",
_OP_REQP = ["instance_name", "disks",
def _CheckHVParams(lu, nodenames, hvname, hvparams): """Hypervisor parameter validation. This function abstract the hypervisor parameter validation to be used in both instance create and instance modify. @type lu: L{LogicalUnit} @param lu: the logical unit for which we check @type nodenames: list @param nodenames: the list of nodes on which we should check @type hvname: string @param hvname: the name of the hypervisor we should use @type hvparams: dict @param hvparams: the parameters which we need to check @raise errors.OpPrereqError: if the parameters are not valid """ hvinfo = lu.rpc.call_hypervisor_validate_params(nodenames, hvname, hvparams) for node in nodenames: info = hvinfo[node] if info.offline: continue info.Raise("Hypervisor parameter validation failed on node %s" % node)
for attr in ["pnode", "snode", "iallocator", "hypervisor"]:
for attr in ["pnode", "snode", "iallocator", "hypervisor", "disk_template"]:
def CheckArguments(self): """Check arguments.
errors.ECODE_INVAL) if (self.op.disk_template == constants.DT_FILE and not constants.ENABLE_FILE_STORAGE): raise errors.OpPrereqError("File storage disabled at configure time",
def CheckArguments(self): """Check arguments.
_CheckDiskTemplate(self.op.disk_template)
def CheckArguments(self): """Check arguments.
hvp = instance.hvparams
conf_hvp = instance.hvparams
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
kvm_cmd, kvm_nics, hvparams = kvm_runtime security_model = hvp[constants.HV_SECURITY_MODEL]
kvm_cmd, kvm_nics, up_hvp = kvm_runtime up_hvp = objects.FillDict(conf_hvp, up_hvp) security_model = conf_hvp[constants.HV_SECURITY_MODEL]
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
kvm_cmd.extend(["-runas", hvp[constants.HV_SECURITY_DOMAIN]])
kvm_cmd.extend(["-runas", conf_hvp[constants.HV_SECURITY_DOMAIN]])
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
nic_type = hvparams[constants.HV_NIC_TYPE]
nic_type = up_hvp[constants.HV_NIC_TYPE]
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
if hvparams[constants.HV_VHOST_NET]:
if up_hvp[constants.HV_VHOST_NET]:
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
vnc_pwd_file = hvp[constants.HV_VNC_PASSWORD_FILE]
vnc_pwd_file = conf_hvp[constants.HV_VNC_PASSWORD_FILE]
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
if hvp[constants.HV_KVM_USE_CHROOT]:
if conf_hvp[constants.HV_KVM_USE_CHROOT]:
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
cmd = "trap '' TERM; read < %s" % self.fifo_file
cmd = ["/bin/sh", "-c", "trap '' TERM; read < %s" % self.fifo_file]
def testTimeoutKill(self): cmd = "trap '' TERM; read < %s" % self.fifo_file timeout = 0.2 strcmd = utils.ShellQuoteArgs(["/bin/sh", "-c", cmd]) out, err, status, ta = utils._RunCmdPipe(strcmd, {}, True, "/", False, timeout, _linger_timeout=0.2) self.assert_(status < 0) self.assertEqual(-status, signal.SIGKILL)
strcmd = utils.ShellQuoteArgs(["/bin/sh", "-c", cmd]) out, err, status, ta = utils._RunCmdPipe(strcmd, {}, True, "/", False,
out, err, status, ta = utils._RunCmdPipe(cmd, {}, False, "/", False,
def testTimeoutKill(self): cmd = "trap '' TERM; read < %s" % self.fifo_file timeout = 0.2 strcmd = utils.ShellQuoteArgs(["/bin/sh", "-c", cmd]) out, err, status, ta = utils._RunCmdPipe(strcmd, {}, True, "/", False, timeout, _linger_timeout=0.2) self.assert_(status < 0) self.assertEqual(-status, signal.SIGKILL)
op.result = errors.EncodeException(err)
to_encode = err
def RunTask(self, job): # pylint: disable-msg=W0221 """Job executor.
op.result = str(err)
to_encode = errors.OpExecError(str(err)) op.result = errors.EncodeException(to_encode)
def RunTask(self, job): # pylint: disable-msg=W0221 """Job executor.
filled_hvp = objects.FillDict(cluster.hvparams[self.op.hypervisor],
filled_hvp = objects.FillDict(cluster.GetHVDefaults(self.op.hypervisor, self.op.os_type),
def CheckPrereq(self): """Check prerequisites.
self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
def CheckPrereq(self): """Check prerequisites.
"not enough memory on to accommodate" " failovers should peer node %s fail", prinode)
"not enough memory to accomodate instance failovers" " should node %s fail", prinode)
def _VerifyNPlusOneMemory(self, node_image, instance_cfg): """Verify N+1 Memory Resilience.
logging.Filehandler.handleError(self, record)
logging.FileHandler.handleError(self, record)
def handleError(self, record): """Handle errors which occur during an emit() call.
raise errors.OpPrereqError("Remote imports require names to be checked" %
raise errors.OpPrereqError("Remote imports require names to be checked",
def CheckArguments(self): """Check arguments.