rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
try: self.callback(False) except: logging.error("Unexpected exception", exc_info=True)
self.callback(False)
def process_IN_IGNORED(self, event): # Since we monitor a single file rather than the directory it resides in, # when that file is replaced with another one (which is what happens when # utils.WriteFile, the most normal way of updating files in ganeti, is # called) we're going to receive an IN_IGNORED event from inotify, because # of the file removal (which is contextual with the replacement). In such a # case we'll need to create a watcher for the "new" file. This can be done # by the callback by calling "enable" again on us. logging.debug("Received 'ignored' inotify event for %s", event.path) self.watch_handle = None
try: self.callback(True) except: logging.error("Unexpected exception", exc_info=True)
self.callback(True)
def process_IN_MODIFY(self, event): # This gets called when the monitored file is modified. Note that this # doesn't usually happen in Ganeti, as most of the time we're just # replacing any file with a new one, at filesystem level, rather than # actually changing it. (see utils.WriteFile) logging.debug("Received 'modify' inotify event for %s", event.path)
instance_os = self.op.os_name
instance_os = self.op.os_type
def CheckPrereq(self): """Check prerequisites.
vif_data.append("'%s'" % nic_str)
def _WriteConfigFile(cls, instance, block_devices): """Write the Xen config file for the instance.
vif_data.append("'%s'" % nic_str)
def _WriteConfigFile(cls, instance, block_devices): """Create a Xen 3.1 HVM config file.
raise CertificateError("SSL issue: %r" % err)
raise CertificateError("SSL issue: %s (%r)" % (err, err))
def _SendRequest(self, method, path, query, content): """Sends an HTTP request.
expanded_name = self.cfg.ExpandInstanceName(self.op.instance_name) if expanded_name is None: raise errors.OpPrereqError("Instance '%s' not known" % self.op.instance_name, errors.ECODE_NOENT) self.needed_locks[locking.LEVEL_INSTANCE] = expanded_name self.op.instance_name = expanded_name
self.op.instance_name = _ExpandInstanceName(self.cfg, self.op.instance_name) self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
def _ExpandAndLockInstance(self): """Helper function to expand and lock an instance.
node = lu.cfg.ExpandNodeName(name) if node is None: raise errors.OpPrereqError("No such node name '%s'" % name, errors.ECODE_NOENT)
node = _ExpandNodeName(lu.cfg, name)
def _GetWantedNodes(lu, nodes): """Returns list of checked and expanded node names. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @type nodes: list @param nodes: list of node names or None for all nodes @rtype: list @return: the list of nodes, sorted @raise errors.OpProgrammerError: if the nodes parameter is wrong type """ if not isinstance(nodes, list): raise errors.OpPrereqError("Invalid argument type 'nodes'", errors.ECODE_INVAL) if not nodes: raise errors.ProgrammerError("_GetWantedNodes should only be called with a" " non-empty list of nodes whose name is to be expanded.") wanted = [] for name in nodes: node = lu.cfg.ExpandNodeName(name) if node is None: raise errors.OpPrereqError("No such node name '%s'" % name, errors.ECODE_NOENT) wanted.append(node) return utils.NiceSort(wanted)
wanted = [] for name in instances: instance = lu.cfg.ExpandInstanceName(name) if instance is None: raise errors.OpPrereqError("No such instance name '%s'" % name, errors.ECODE_NOENT) wanted.append(instance)
wanted = [_ExpandInstanceName(lu.cfg, name) for name in instances]
def _GetWantedInstances(lu, instances): """Returns list of checked and expanded instance names. @type lu: L{LogicalUnit} @param lu: the logical unit on whose behalf we execute @type instances: list @param instances: list of instance names or None for all instances @rtype: list @return: the list of instances, sorted @raise errors.OpPrereqError: if the instances parameter is wrong type @raise errors.OpPrereqError: if any of the passed instances is not found """ if not isinstance(instances, list): raise errors.OpPrereqError("Invalid argument type 'instances'", errors.ECODE_INVAL) if instances: wanted = [] for name in instances: instance = lu.cfg.ExpandInstanceName(name) if instance is None: raise errors.OpPrereqError("No such instance name '%s'" % name, errors.ECODE_NOENT) wanted.append(instance) else: wanted = utils.NiceSort(lu.cfg.GetInstanceList()) return wanted
full_name = self.cfg.ExpandInstanceName(name) if full_name is None: raise errors.OpPrereqError("Instance '%s' not known" % name, errors.ECODE_NOENT)
full_name = _ExpandInstanceName(self.cfg, name)
def ExpandNames(self): if not isinstance(self.op.instances, list): raise errors.OpPrereqError("Invalid argument type 'instances'", errors.ECODE_INVAL)
node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name)) if node is None: raise errors.OpPrereqError("Node '%s' is unknown." % self.op.node_name, errors.ECODE_NOENT)
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) node = self.cfg.GetNodeInfo(self.op.node_name) assert node is not None
def CheckPrereq(self): """Check prerequisites.
node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_NOENT) self.op.node_name = node_name
self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
def CheckArguments(self): node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_NOENT)
node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_INVAL) self.op.node_name = node_name
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
def CheckArguments(self): node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_INVAL) self.op.node_name = node_name _CheckBooleanOpField(self.op, 'master_candidate') _CheckBooleanOpField(self.op, 'offline') _CheckBooleanOpField(self.op, 'drained') all_mods = [self.op.offline, self.op.master_candidate, self.op.drained] if all_mods.count(None) == 3: raise errors.OpPrereqError("Please pass at least one modification", errors.ECODE_INVAL) if all_mods.count(True) > 1: raise errors.OpPrereqError("Can't set the node into more than one" " state at the same time", errors.ECODE_INVAL)
node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_NOENT) self.op.node_name = node_name if node_name == self.cfg.GetMasterNode() and not self.op.force:
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name) if self.op.node_name == self.cfg.GetMasterNode() and not self.op.force:
def CheckArguments(self): node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_NOENT) self.op.node_name = node_name if node_name == self.cfg.GetMasterNode() and not self.op.force: raise errors.OpPrereqError("The node is the master and the force" " parameter was not set", errors.ECODE_INVAL)
pnode = self.cfg.GetNodeInfo( self.cfg.ExpandNodeName(instance.primary_node)) if pnode is None: raise errors.OpPrereqError("Primary node '%s' is unknown" % self.op.pnode, errors.ECODE_NOENT) result = self.rpc.call_os_get(pnode.name, self.op.os_type)
pnode = _ExpandNodeName(self.cfg, instance.primary_node) result = self.rpc.call_os_get(pnode, self.op.os_type)
def CheckPrereq(self): """Check prerequisites.
(self.op.os_type, pnode.name),
(self.op.os_type, pnode),
def CheckPrereq(self): """Check prerequisites.
instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.op.instance_name)) if instance is None: raise errors.OpPrereqError("Instance '%s' not known" % self.op.instance_name, errors.ECODE_NOENT)
self.op.instance_name = _ExpandInstanceName(self.cfg, self.op.instance_name) instance = self.cfg.GetInstanceInfo(self.op.instance_name) assert instance is not None
def CheckPrereq(self): """Check prerequisites.
target_node = self.cfg.ExpandNodeName(self.op.target_node) if target_node is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.target_node, errors.ECODE_NOENT)
target_node = _ExpandNodeName(self.cfg, self.op.target_node)
def ExpandNames(self): self._ExpandAndLockInstance() target_node = self.cfg.ExpandNodeName(self.op.target_node) if target_node is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.target_node, errors.ECODE_NOENT) self.op.target_node = target_node self.needed_locks[locking.LEVEL_NODE] = [target_node] self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) if self.op.node_name is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, errors.ECODE_NOENT)
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
def ExpandNames(self): self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) if self.op.node_name is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, errors.ECODE_NOENT)
instance = self.cfg.GetInstanceInfo( self.cfg.ExpandInstanceName(self.instance_name)) if instance is None: raise errors.OpPrereqError("Instance '%s' not known" % self.instance_name, errors.ECODE_NOENT)
instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name) instance = self.cfg.GetInstanceInfo(instance_name) assert instance is not None
def CheckPrereq(self): """Check prerequisites.
def _ExpandNode(self, node): """Expands and checks one node name. """ node_full = self.cfg.ExpandNodeName(node) if node_full is None: raise errors.OpPrereqError("Unknown node %s" % node, errors.ECODE_NOENT) return node_full
def _ExpandNode(self, node): """Expands and checks one node name.
self.op.pnode = self._ExpandNode(self.op.pnode)
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
def ExpandNames(self): """ExpandNames for CreateInstance.
self.op.snode = self._ExpandNode(self.op.snode)
self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
def ExpandNames(self): """ExpandNames for CreateInstance.
self.op.src_node = src_node = self._ExpandNode(src_node)
self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
def ExpandNames(self): """ExpandNames for CreateInstance.
remote_node = self.cfg.ExpandNodeName(self.op.remote_node) if remote_node is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node, errors.ECODE_NOENT)
remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
def ExpandNames(self): self._ExpandAndLockInstance()
remote_node = self.cfg.ExpandNodeName(self.op.remote_node) if remote_node is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node, errors.ECODE_NOENT) self.op.remote_node = remote_node
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
def ExpandNames(self): self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) if self.op.node_name is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, errors.ECODE_NOENT)
self.needed_locks[locking.LEVEL_NODE] = [remote_node]
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
def ExpandNames(self): self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name) if self.op.node_name is None: raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name, errors.ECODE_NOENT)
node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_NOENT) self.op.node_name = node_name
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
def CheckArguments(self): node_name = self.cfg.ExpandNodeName(self.op.node_name) if node_name is None: raise errors.OpPrereqError("Invalid node name '%s'" % self.op.node_name, errors.ECODE_NOENT)
full_name = self.cfg.ExpandInstanceName(name) if full_name is None: raise errors.OpPrereqError("Instance '%s' not known" % name, errors.ECODE_NOENT)
full_name = _ExpandInstanceName(self.cfg, name)
def ExpandNames(self): self.needed_locks = {} self.share_locks = dict.fromkeys(locking.LEVELS, 1)
self.dst_node = self.cfg.GetNodeInfo( self.cfg.ExpandNodeName(self.op.target_node)) if self.dst_node is None: raise errors.OpPrereqError("Wrong node name %s" % self.op.target_node, errors.ECODE_NOENT)
self.op.target_node = _ExpandNodeName(self.cfg, self.op.target_node) self.dst_node = self.cfg.GetNodeInfo(self.op.target_node) assert self.dst_node is not None
def CheckPrereq(self): """Check prerequisites.
name = self.cfg.ExpandNodeName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid node name (%s)" % (self.op.name,), errors.ECODE_NOENT) self.op.name = name self.needed_locks[locking.LEVEL_NODE] = name
self.op.name = _ExpandNodeName(self.cfg, self.op.name) self.needed_locks[locking.LEVEL_NODE] = self.op.name
def ExpandNames(self): self.needed_locks = {} if self.op.kind == constants.TAG_NODE: name = self.cfg.ExpandNodeName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid node name (%s)" % (self.op.name,), errors.ECODE_NOENT) self.op.name = name self.needed_locks[locking.LEVEL_NODE] = name elif self.op.kind == constants.TAG_INSTANCE: name = self.cfg.ExpandInstanceName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid instance name (%s)" % (self.op.name,), errors.ECODE_NOENT) self.op.name = name self.needed_locks[locking.LEVEL_INSTANCE] = name
name = self.cfg.ExpandInstanceName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid instance name (%s)" % (self.op.name,), errors.ECODE_NOENT) self.op.name = name self.needed_locks[locking.LEVEL_INSTANCE] = name
self.op.name = _ExpandInstanceName(self.cfg, self.op.name) self.needed_locks[locking.LEVEL_INSTANCE] = self.op.name
def ExpandNames(self): self.needed_locks = {} if self.op.kind == constants.TAG_NODE: name = self.cfg.ExpandNodeName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid node name (%s)" % (self.op.name,), errors.ECODE_NOENT) self.op.name = name self.needed_locks[locking.LEVEL_NODE] = name elif self.op.kind == constants.TAG_INSTANCE: name = self.cfg.ExpandInstanceName(self.op.name) if name is None: raise errors.OpPrereqError("Invalid instance name (%s)" % (self.op.name,), errors.ECODE_NOENT) self.op.name = name self.needed_locks[locking.LEVEL_INSTANCE] = name
fname = self.cfg.ExpandInstanceName(self.op.name) if fname is None: raise errors.OpPrereqError("Instance '%s' not found for relocation" % self.op.name, errors.ECODE_NOENT)
fname = _ExpandInstanceName(self.cfg, self.op.name)
def CheckPrereq(self): """Check prerequisites.
return None
def _TryReadUidFile(cls, uid_file): """Try to read a uid file
return None return uid
return None
def _TryReadUidFile(cls, uid_file): """Try to read a uid file
uuid=utils.NewUUID(),
def InitCluster(cluster_name, mac_prefix, master_netdev, file_storage_dir, candidate_pool_size, secondary_ip=None, vg_name=None, beparams=None, nicparams=None, hvparams=None, enabled_hypervisors=None, modify_etc_hosts=True, modify_ssh_setup=True, maintain_node_health=False, drbd_helper=None, uid_pool=None, default_iallocator=None, primary_ip_version=None): """Initialise the cluster. @type candidate_pool_size: int @param candidate_pool_size: master candidate pool size """ # TODO: complete the docstring if config.ConfigWriter.IsCluster(): raise errors.OpPrereqError("Cluster is already initialised", errors.ECODE_STATE) if not enabled_hypervisors: raise errors.OpPrereqError("Enabled hypervisors list must contain at" " least one member", errors.ECODE_INVAL) invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES if invalid_hvs: raise errors.OpPrereqError("Enabled hypervisors contains invalid" " entries: %s" % invalid_hvs, errors.ECODE_INVAL) ipcls = None if primary_ip_version == constants.IP4_VERSION: ipcls = netutils.IP4Address elif primary_ip_version == constants.IP6_VERSION: ipcls = netutils.IP6Address else: raise errors.OpPrereqError("Invalid primary ip version: %d." % primary_ip_version) hostname = netutils.GetHostname(family=ipcls.family) if not ipcls.IsValid(hostname.ip): raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d" " address." % (hostname.ip, primary_ip_version)) if ipcls.IsLoopback(hostname.ip): raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback" " address. Please fix DNS or %s." % (hostname.ip, constants.ETC_HOSTS), errors.ECODE_ENVIRON) if not ipcls.Own(hostname.ip): raise errors.OpPrereqError("Inconsistency: this host's name resolves" " to %s,\nbut this ip address does not" " belong to this host" % hostname.ip, errors.ECODE_ENVIRON) clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): raise errors.OpPrereqError("Cluster IP already active", errors.ECODE_NOTUNIQUE) if not secondary_ip: if primary_ip_version == constants.IP6_VERSION: raise errors.OpPrereqError("When using a IPv6 primary address, a valid" " IPv4 address must be given as secondary", errors.ECODE_INVAL) secondary_ip = hostname.ip if not netutils.IP4Address.IsValid(secondary_ip): raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid" " IPv4 address." % secondary_ip, errors.ECODE_INVAL) if not netutils.IP4Address.Own(secondary_ip): raise errors.OpPrereqError("You gave %s as secondary IP," " but it does not belong to this host." % secondary_ip, errors.ECODE_ENVIRON) if vg_name is not None: # Check if volume group is valid vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, constants.MIN_VG_SIZE) if vgstatus: raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if" " you are not using lvm" % vgstatus, errors.ECODE_INVAL) if drbd_helper is not None: try: curr_helper = bdev.BaseDRBD.GetUsermodeHelper() except errors.BlockDeviceError, err: raise errors.OpPrereqError("Error while checking drbd helper" " (specify --no-drbd-storage if you are not" " using drbd): %s" % str(err), errors.ECODE_ENVIRON) if drbd_helper != curr_helper: raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s" " is the current helper" % (drbd_helper, curr_helper), errors.ECODE_INVAL) file_storage_dir = _InitFileStorage(file_storage_dir) if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix, errors.ECODE_INVAL) result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) if result.failed: raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % (master_netdev, result.output.strip()), errors.ECODE_INVAL) dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)] utils.EnsureDirs(dirs) utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) objects.NIC.CheckParameterSyntax(nicparams) # hvparams is a mapping of hypervisor->hvparams dict for hv_name, hv_params in hvparams.iteritems(): utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) hv_class = hypervisor.GetHypervisor(hv_name) hv_class.CheckParameterSyntax(hv_params) # set up ssh config and /etc/hosts sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB) sshkey = sshline.split(" ")[1] if modify_etc_hosts: utils.AddHostToEtcHosts(hostname.name, hostname.ip) if modify_ssh_setup: _InitSSHSetup() if default_iallocator is not None: alloc_script = utils.FindFile(default_iallocator, constants.IALLOCATOR_SEARCH_PATH, os.path.isfile) if alloc_script is None: raise errors.OpPrereqError("Invalid default iallocator script '%s'" " specified" % default_iallocator, errors.ECODE_INVAL) now = time.time() # init of cluster config file cluster_config = objects.Cluster( serial_no=1, rsahostkeypub=sshkey, highest_used_port=(constants.FIRST_DRBD_PORT - 1), mac_prefix=mac_prefix, volume_group_name=vg_name, tcpudp_port_pool=set(), master_node=hostname.name, master_ip=clustername.ip, master_netdev=master_netdev, cluster_name=clustername.name, file_storage_dir=file_storage_dir, enabled_hypervisors=enabled_hypervisors, beparams={constants.PP_DEFAULT: beparams}, nicparams={constants.PP_DEFAULT: nicparams}, hvparams=hvparams, candidate_pool_size=candidate_pool_size, modify_etc_hosts=modify_etc_hosts, modify_ssh_setup=modify_ssh_setup, uid_pool=uid_pool, ctime=now, mtime=now, uuid=utils.NewUUID(), maintain_node_health=maintain_node_health, drbd_usermode_helper=drbd_helper, default_iallocator=default_iallocator, primary_ip_family=ipcls.family, ) master_node_config = objects.Node(name=hostname.name, primary_ip=hostname.ip, secondary_ip=secondary_ip, serial_no=1, master_candidate=True, offline=False, drained=False, ) InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config) cfg = config.ConfigWriter(offline=True) ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE) cfg.Update(cfg.GetClusterInfo(), logging.error) backend.WriteSsconfFiles(cfg.GetSsconfValues()) # set up the inter-node password and certificate _InitGanetiServerSetup(hostname.name) # start the master ip # TODO: Review rpc call from bootstrap # TODO: Warn on failed start master rpc.RpcRunner.call_node_start_master(hostname.name, True, False)
default_nodegroup = objects.NodeGroup( uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID), name="default", members=[master_node_config.name], ) nodegroups = { default_nodegroup.uuid: default_nodegroup, }
def InitConfig(version, cluster_config, master_node_config, cfg_file=constants.CLUSTER_CONF_FILE): """Create the initial cluster configuration. It will contain the current node, which will also be the master node, and no instances. @type version: int @param version: configuration version @type cluster_config: L{objects.Cluster} @param cluster_config: cluster configuration @type master_node_config: L{objects.Node} @param master_node_config: master node configuration @type cfg_file: string @param cfg_file: configuration file path """ nodes = { master_node_config.name: master_node_config, } now = time.time() config_data = objects.ConfigData(version=version, cluster=cluster_config, nodes=nodes, instances={}, serial_no=1, ctime=now, mtime=now) utils.WriteFile(cfg_file, data=serializer.Dump(config_data.ToDict()), mode=0600)
The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid} @type name: str @param name: the daemon name to use @param pid: if passed, will be used instead of getpid() @raise errors.GenericError: if the pid file already exists and
@type pidfile: sting @param pidfile: the path to the file to be written @raise errors.LockError: if the pid file already exists and
def WritePidFile(pidfile): """Write the current process pidfile. The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid} @type name: str @param name: the daemon name to use @param pid: if passed, will be used instead of getpid() @raise errors.GenericError: if the pid file already exists and points to a live process """ # We don't rename nor truncate the file to not drop locks under # existing processes fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600) # Lock the PID file (and fail if not possible to do so). Any code # wanting to send a signal to the daemon should try to lock the PID # file before reading it. If acquiring the lock succeeds, the daemon is # no longer running and the signal should not be sent. LockFile(fd_pidfile) os.write(fd_pidfile, "%d\n" % os.getpid()) return fd_pidfile
raise errors.OpPrereqError("Either all disks have are adoped or none is",
raise errors.OpPrereqError("Either all disks are adopted or none is",
def CheckArguments(self): """Check arguments.
def ExpandNames(self): """ExpandNames for CreateInstance. Figure out the right locks for instance creation. """ self.needed_locks = {}
def ExpandNames(self): """ExpandNames for CreateInstance.
if self.op.name_check: hostname1 = utils.GetHostInfo(self.op.instance_name) self.op.instance_name = instance_name = hostname1.name self.check_ip = hostname1.ip else: instance_name = self.op.instance_name self.check_ip = None
instance_name = self.op.instance_name
def ExpandNames(self): """ExpandNames for CreateInstance.
nic_ip = hostname1.ip
nic_ip = self.hostname1.ip
def ExpandNames(self): """ExpandNames for CreateInstance.
if (self.op.file_driver and not self.op.file_driver in constants.FILE_DRIVER): raise errors.OpPrereqError("Invalid file driver name '%s'" % self.op.file_driver, errors.ECODE_INVAL) if self.op.file_storage_dir and os.path.isabs(self.op.file_storage_dir): raise errors.OpPrereqError("File storage directory path not absolute", errors.ECODE_INVAL) if [self.op.iallocator, self.op.pnode].count(None) != 1: raise errors.OpPrereqError("One and only one of iallocator and primary" " node must be given", errors.ECODE_INVAL)
def ExpandNames(self): """ExpandNames for CreateInstance.
self.op.force_variant = True if self.op.no_install: self.LogInfo("No-installation mode has no effect during import") else: if getattr(self.op, "os_type", None) is None: raise errors.OpPrereqError("No guest OS specified", errors.ECODE_INVAL) self.op.force_variant = getattr(self.op, "force_variant", False)
def ExpandNames(self): """ExpandNames for CreateInstance.
logging.debug("Deferring task %r, new priority %s", defer.priority)
logging.debug("Deferring task %r, new priority %s", args, defer.priority)
def run(self): """Main thread function.
@type beparam: dict @param beparam: the dict to fill
@type beparams: dict @param beparams: the dict to fill
def SimpleFillBE(self, beparams): """Fill a given beparams dict with cluster defaults.
@type nicparam: dict @param nicparam: the dict to fill
@type nicparams: dict @param nicparams: the dict to fill
def SimpleFillNIC(self, nicparams): """Fill a given nicparams dict with cluster defaults.
def OSCoreEnv(inst_os, os_params, debug=0):
def OSCoreEnv(os_name, inst_os, os_params, debug=0):
def OSCoreEnv(inst_os, os_params, debug=0): """Calculate the basic environment for an os script. @type inst_os: L{objects.OS} @param inst_os: operating system for which the environment is being built @type os_params: dict @param os_params: the OS parameters @type debug: integer @param debug: debug level (0 or 1, for OS Api 10) @rtype: dict @return: dict of environment variables @raise errors.BlockDeviceError: if the block device cannot be found """ result = {} api_version = \ max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions)) result['OS_API_VERSION'] = '%d' % api_version result['OS_NAME'] = inst_os.name result['DEBUG_LEVEL'] = '%d' % debug # OS variants if api_version >= constants.OS_API_V15: try: variant = inst_os.name.split('+', 1)[1] except IndexError: variant = inst_os.supported_variants[0] result['OS_VARIANT'] = variant # OS params for pname, pvalue in os_params.items(): result['OSP_%s' % pname.upper()] = pvalue return result
variant = inst_os.name.split('+', 1)[1]
variant = os_name.split('+', 1)[1]
def OSCoreEnv(inst_os, os_params, debug=0): """Calculate the basic environment for an os script. @type inst_os: L{objects.OS} @param inst_os: operating system for which the environment is being built @type os_params: dict @param os_params: the OS parameters @type debug: integer @param debug: debug level (0 or 1, for OS Api 10) @rtype: dict @return: dict of environment variables @raise errors.BlockDeviceError: if the block device cannot be found """ result = {} api_version = \ max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions)) result['OS_API_VERSION'] = '%d' % api_version result['OS_NAME'] = inst_os.name result['DEBUG_LEVEL'] = '%d' % debug # OS variants if api_version >= constants.OS_API_V15: try: variant = inst_os.name.split('+', 1)[1] except IndexError: variant = inst_os.supported_variants[0] result['OS_VARIANT'] = variant # OS params for pname, pvalue in os_params.items(): result['OSP_%s' % pname.upper()] = pvalue return result
result = OSCoreEnv(inst_os, instance.osparams, debug=debug)
result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
def OSEnvironment(instance, inst_os, debug=0): """Calculate the environment for an os script. @type instance: L{objects.Instance} @param instance: target instance for the os script run @type inst_os: L{objects.OS} @param inst_os: operating system for which the environment is being built @type debug: integer @param debug: debug level (0 or 1, for OS Api 10) @rtype: dict @return: dict of environment variables @raise errors.BlockDeviceError: if the block device cannot be found """ result = OSCoreEnv(inst_os, instance.osparams, debug=debug) result['INSTANCE_NAME'] = instance.name result['INSTANCE_OS'] = instance.os result['HYPERVISOR'] = instance.hypervisor result['DISK_COUNT'] = '%d' % len(instance.disks) result['NIC_COUNT'] = '%d' % len(instance.nics) # Disks for idx, disk in enumerate(instance.disks): real_disk = _OpenRealBD(disk) result['DISK_%d_PATH' % idx] = real_disk.dev_path result['DISK_%d_ACCESS' % idx] = disk.mode if constants.HV_DISK_TYPE in instance.hvparams: result['DISK_%d_FRONTEND_TYPE' % idx] = \ instance.hvparams[constants.HV_DISK_TYPE] if disk.dev_type in constants.LDS_BLOCK: result['DISK_%d_BACKEND_TYPE' % idx] = 'block' elif disk.dev_type == constants.LD_FILE: result['DISK_%d_BACKEND_TYPE' % idx] = \ 'file:%s' % disk.physical_id[0] # NICs for idx, nic in enumerate(instance.nics): result['NIC_%d_MAC' % idx] = nic.mac if nic.ip: result['NIC_%d_IP' % idx] = nic.ip result['NIC_%d_MODE' % idx] = nic.nicparams[constants.NIC_MODE] if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: result['NIC_%d_BRIDGE' % idx] = nic.nicparams[constants.NIC_LINK] if nic.nicparams[constants.NIC_LINK]: result['NIC_%d_LINK' % idx] = nic.nicparams[constants.NIC_LINK] if constants.HV_NIC_TYPE in instance.hvparams: result['NIC_%d_FRONTEND_TYPE' % idx] = \ instance.hvparams[constants.HV_NIC_TYPE] # HV/BE params for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]: for key, value in source.items(): result["INSTANCE_%s_%s" % (kind, key)] = str(value) return result
validate_env = OSCoreEnv(tbv, osparams)
validate_env = OSCoreEnv(osname, tbv, osparams)
def ValidateOS(required, osname, checks, osparams): """Validate the given OS' parameters. @type required: boolean @param required: whether absence of the OS should translate into failure or not @type osname: string @param osname: the OS to be validated @type checks: list @param checks: list of the checks to run (currently only 'parameters') @type osparams: dict @param osparams: dictionary with OS parameters @rtype: boolean @return: True if the validation passed, or False if the OS was not found and L{required} was false """ if not constants.OS_VALIDATE_CALLS.issuperset(checks): _Fail("Unknown checks required for OS %s: %s", osname, set(checks).difference(constants.OS_VALIDATE_CALLS)) name_only = osname.split("+", 1)[0] status, tbv = _TryOSFromDisk(name_only, None) if not status: if required: _Fail(tbv) else: return False if max(tbv.api_versions) < constants.OS_API_V20: return True if constants.OS_VALIDATE_PARAMETERS in checks: _CheckOSPList(tbv, osparams.keys()) validate_env = OSCoreEnv(tbv, osparams) result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env, cwd=tbv.path) if result.failed: logging.error("os validate command '%s' returned error: %s output: %s", result.cmd, result.fail_reason, result.output) _Fail("OS validation script failed (%s), output: %s", result.fail_reason, result.output, log=False) return True
return utils.ReadFile("%s/man/%s.sgml" %
return utils.ReadFile("%s/man/%s.rst" %
def _ReadManFile(name): return utils.ReadFile("%s/man/%s.sgml" % (testutils.GetSourceDir(), name))
pattern = "<cmdsynopsis>\s*<command>%s</command>" % re.escape(cmd) if not re.findall(pattern, mantext, re.S):
pattern = r"^(\| )?\*\*%s\*\*" % re.escape(cmd) if not re.findall(pattern, mantext, re.DOTALL | re.MULTILINE):
def _CheckManpage(self, script, mantext, commands): missing = []
shutil.rmtree(finaldestdir, True)
shutil.rmtree(finaldestdir, ignore_errors=True)
def FinalizeExport(instance, snap_disks): """Write out the export configuration information. @type instance: L{objects.Instance} @param instance: the instance which we export, used for saving configuration @type snap_disks: list of L{objects.Disk} @param snap_disks: list of snapshot block devices, which will be used to get the actual name of the dump file @rtype: None """ destdir = utils.PathJoin(constants.EXPORT_DIR, instance.name + ".new") finaldestdir = utils.PathJoin(constants.EXPORT_DIR, instance.name) config = objects.SerializableConfigParser() config.add_section(constants.INISECT_EXP) config.set(constants.INISECT_EXP, 'version', '0') config.set(constants.INISECT_EXP, 'timestamp', '%d' % int(time.time())) config.set(constants.INISECT_EXP, 'source', instance.primary_node) config.set(constants.INISECT_EXP, 'os', instance.os) config.set(constants.INISECT_EXP, 'compression', 'gzip') config.add_section(constants.INISECT_INS) config.set(constants.INISECT_INS, 'name', instance.name) config.set(constants.INISECT_INS, 'memory', '%d' % instance.beparams[constants.BE_MEMORY]) config.set(constants.INISECT_INS, 'vcpus', '%d' % instance.beparams[constants.BE_VCPUS]) config.set(constants.INISECT_INS, 'disk_template', instance.disk_template) nic_total = 0 for nic_count, nic in enumerate(instance.nics): nic_total += 1 config.set(constants.INISECT_INS, 'nic%d_mac' % nic_count, '%s' % nic.mac) config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip) config.set(constants.INISECT_INS, 'nic%d_bridge' % nic_count, '%s' % nic.bridge) # TODO: redundant: on load can read nics until it doesn't exist config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total) disk_total = 0 for disk_count, disk in enumerate(snap_disks): if disk: disk_total += 1 config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count, ('%s' % disk.iv_name)) config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count, ('%s' % disk.physical_id[1])) config.set(constants.INISECT_INS, 'disk%d_size' % disk_count, ('%d' % disk.size)) config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total) utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE), data=config.Dumps()) shutil.rmtree(finaldestdir, True) shutil.move(destdir, finaldestdir)
if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) ==
if (os.path.commonprefix([file_storage_dir, base_file_storage_dir]) !=
def _TransformFileStorageDir(file_storage_dir): """Checks whether given file_storage_dir is valid. Checks wheter the given file_storage_dir is within the cluster-wide default file_storage_dir stored in SimpleStore. Only paths under that directory are allowed. @type file_storage_dir: str @param file_storage_dir: the path to check @return: the normalized path if valid, None otherwise """ if not constants.ENABLE_FILE_STORAGE: _Fail("File storage disabled at configure time") cfg = _GetConfig() file_storage_dir = os.path.normpath(file_storage_dir) base_file_storage_dir = cfg.GetFileStorageDir() if (not os.path.commonprefix([file_storage_dir, base_file_storage_dir]) == base_file_storage_dir): _Fail("File storage directory '%s' is not under base file" " storage directory '%s'", file_storage_dir, base_file_storage_dir) return file_storage_dir
feedback_fn("OS %s already in %s, ignoring", val, desc)
feedback_fn("OS %s already in %s, ignoring" % (val, desc))
def helper_os(aname, mods, desc): desc += " OS list" lst = getattr(self.cluster, aname) for key, val in mods: if key == constants.DDM_ADD: if val in lst: feedback_fn("OS %s already in %s, ignoring", val, desc) else: lst.append(val) elif key == constants.DDM_REMOVE: if val in lst: lst.remove(val) else: feedback_fn("OS %s not found in %s, ignoring", val, desc) else: raise errors.ProgrammerError("Invalid modification '%s'" % key)
feedback_fn("OS %s not found in %s, ignoring", val, desc)
feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
def helper_os(aname, mods, desc): desc += " OS list" lst = getattr(self.cluster, aname) for key, val in mods: if key == constants.DDM_ADD: if val in lst: feedback_fn("OS %s already in %s, ignoring", val, desc) else: lst.append(val) elif key == constants.DDM_REMOVE: if val in lst: lst.remove(val) else: feedback_fn("OS %s not found in %s, ignoring", val, desc) else: raise errors.ProgrammerError("Invalid modification '%s'" % key)
def ExpandNames(self):
def CheckArguments(self):
def ExpandNames(self): if not isinstance(self.op.instances, list): raise errors.OpPrereqError("Invalid argument type 'instances'", errors.ECODE_INVAL)
def ExpandNames(self):
def CheckArguments(self):
def ExpandNames(self): if self.op.names: raise errors.OpPrereqError("Selective OS query not supported", errors.ECODE_INVAL)
def ExpandNames(self):
def CheckArguments(self):
def ExpandNames(self): _CheckOutputFields(static=self._FIELDS_STATIC, dynamic=self._FIELDS_DYNAMIC, selected=self.op.output_fields)
def ExpandNames(self): self.needed_locks = {}
def CheckArguments(self):
def ExpandNames(self): self.needed_locks = {}
if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT, constants.INSTANCE_REBOOT_HARD, constants.INSTANCE_REBOOT_FULL]: raise errors.ParameterError("reboot type not in [%s, %s, %s]" % (constants.INSTANCE_REBOOT_SOFT, constants.INSTANCE_REBOOT_HARD, constants.INSTANCE_REBOOT_FULL))
def ExpandNames(self): if self.op.reboot_type not in [constants.INSTANCE_REBOOT_SOFT, constants.INSTANCE_REBOOT_HARD, constants.INSTANCE_REBOOT_FULL]: raise errors.ParameterError("reboot type not in [%s, %s, %s]" % (constants.INSTANCE_REBOOT_SOFT, constants.INSTANCE_REBOOT_HARD, constants.INSTANCE_REBOOT_FULL)) self._ExpandAndLockInstance()
if not isinstance(self.op.instances, list): raise errors.OpPrereqError("Invalid argument type 'instances'", errors.ECODE_INVAL)
def ExpandNames(self): self.needed_locks = {} self.share_locks = dict.fromkeys(locking.LEVELS, 1)
for status, result, name in self.jobs:
for _, status, result, name in self.jobs:
def WaitOrShow(self, wait): """Wait for job results or only print the job IDs.
def _GetUpdatedParams(old_params, update_dict):
def _GetUpdatedParams(old_params, update_dict, use_default=True, use_none=False):
def _GetUpdatedParams(old_params, update_dict): """Return the new version of a parameter dictionary. @type old_params: dict @param old_params: old parameters @type update_dict: dict @param update_dict: dict containing new parameter values, or constants.VALUE_DEFAULT to reset the parameter to its default value @rtype: dict @return: the new parameter dictionary """ params_copy = copy.deepcopy(old_params) for key, val in update_dict.iteritems(): if val == constants.VALUE_DEFAULT: try: del params_copy[key] except KeyError: pass else: params_copy[key] = val return params_copy
if val == constants.VALUE_DEFAULT:
if ((use_default and val == constants.VALUE_DEFAULT) or (use_none and val is None)):
def _GetUpdatedParams(old_params, update_dict): """Return the new version of a parameter dictionary. @type old_params: dict @param old_params: old parameters @type update_dict: dict @param update_dict: dict containing new parameter values, or constants.VALUE_DEFAULT to reset the parameter to its default value @rtype: dict @return: the new parameter dictionary """ params_copy = copy.deepcopy(old_params) for key, val in update_dict.iteritems(): if val == constants.VALUE_DEFAULT: try: del params_copy[key] except KeyError: pass else: params_copy[key] = val return params_copy
self._temporary_ids.DropECReservations(ec_id) self._temporary_macs.DropECReservations(ec_id) self._temporary_secrets.DropECReservations(ec_id)
for rm in self._all_rms: rm.DropECReservations(ec_id)
def DropECReservations(self, ec_id): """Drop per-execution-context reservations
request_version)
data_version)
def POST(self): """Create an instance.
self._next = max(self._limit, self._next * self._factor)
self._next = min(self._limit, self._next * self._factor)
def __call__(self): """Returns current delay and calculates the next one.
not_marked = True for op in self.ops: if op.status in constants.OPS_FINALIZED: assert not_marked, "Finalized opcodes found after non-finalized ones" continue op.status = status op.result = result not_marked = False
try: not_marked = True for op in self.ops: if op.status in constants.OPS_FINALIZED: assert not_marked, "Finalized opcodes found after non-finalized ones" continue op.status = status op.result = result not_marked = False finally: self.queue.UpdateJobUnlocked(self)
def MarkUnfinishedOps(self, status, result): """Mark unfinished opcodes with a given status and result.
queue.CancelJobUnlocked(job)
job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED, "Job canceled by request")
def RunTask(self, job): # pylint: disable-msg=W0221 """Job executor.
try: job.MarkUnfinishedOps(constants.OP_STATUS_ERROR, "Unclean master daemon shutdown") finally: self.UpdateJobUnlocked(job)
job.MarkUnfinishedOps(constants.OP_STATUS_ERROR, "Unclean master daemon shutdown")
def __init__(self, context): """Constructor for JobQueue.
self.CancelJobUnlocked(job)
job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED, "Job canceled by request")
def CancelJob(self, job_id): """Cancels a job.
try: job.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None) finally: self.UpdateJobUnlocked(job)
job.MarkUnfinishedOps(constants.OP_STATUS_CANCELING, None)
def CancelJob(self, job_id): """Cancels a job.
@_RequireOpenQueue def CancelJobUnlocked(self, job): """Marks a job as canceled. """ try: job.MarkUnfinishedOps(constants.OP_STATUS_CANCELED, "Job canceled by request") finally: self.UpdateJobUnlocked(job)
def CancelJob(self, job_id): """Cancels a job.
"""
"""Runs all tests.
def runTests(self): """
logging.basicConfig(filename=os.devnull)
_SetupLogging("LOGTOSTDERR" in os.environ)
def runTests(self): """
valid = valid and osl and osl[0][1]
valid = bool(valid and osl and osl[0][1])
def Exec(self, feedback_fn): """Compute the list of OSes.
elif ask_key: options.extend([ "-oStrictHostKeyChecking=ask", ])
else: if ask_key: options.append("-oStrictHostKeyChecking=ask") elif strict_host_check: options.append("-oStrictHostKeyChecking=yes") else: options.append("-oStrictHostKeyChecking=no")
def _BuildSshOptions(self, batch, ask_key, use_cluster_key, strict_host_check): """Builds a list with needed SSH options.
self.dev_path = "/dev/%s/%s" % (self._vg_name, self._lv_name)
self._ValidateName(self._vg_name) self._ValidateName(self._lv_name) self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
def __init__(self, unique_id, children, size): """Attaches to a LV device.
self.dev_path = "/dev/%s/%s" % (self._vg_name, self._lv_name)
self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
def Rename(self, new_id): """Rename this logical volume.
'-p',
def TestClusterBurnin(): """Burnin""" master = qa_config.GetMasterNode() options = qa_config.get('options', {}) disk_template = options.get('burnin-disk-template', 'drbd') parallel = options.get('burnin-in-parallel', False) check_inst = options.get('burnin-check-instances', False) do_rename = options.get('burnin-rename', '') # Get as many instances as we need instances = [] try: try: num = qa_config.get('options', {}).get('burnin-instances', 1) for _ in range(0, num): instances.append(qa_config.AcquireInstance()) except qa_error.OutOfInstancesError: print "Not enough instances, continuing anyway." if len(instances) < 1: raise qa_error.Error("Burnin needs at least one instance") script = qa_utils.UploadFile(master['primary'], '../tools/burnin') try: # Run burnin cmd = [script, '-p', '--os=%s' % qa_config.get('os'), '--disk-size=%s' % ",".join(qa_config.get('disk')), '--disk-growth=%s' % ",".join(qa_config.get('disk-growth')), '--disk-template=%s' % disk_template] if parallel: cmd.append('--parallel') if check_inst: cmd.append('--http-check') if do_rename: cmd.append('--rename=%s' % do_rename) cmd += [inst['name'] for inst in instances] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0) finally: cmd = ['rm', '-f', script] AssertEqual(StartSSH(master['primary'], utils.ShellQuoteArgs(cmd)).wait(), 0) finally: for inst in instances: qa_config.ReleaseInstance(inst)
nic_val = "nic,macaddr=%s,%s" % (nic.mac, nic_model)
nic_val = "nic,vlan=%s,macaddr=%s,%s" % (nic_seq, nic.mac, nic_model)
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
kvm_cmd.extend(['-net', 'tap,script=%s' % script])
kvm_cmd.extend(['-net', 'tap,vlan=%s,script=%s' % (nic_seq, script)])
def _ExecuteKVMRuntime(self, instance, kvm_runtime, incoming=None): """Execute a KVM cmd, after completing it with some last minute data
_CheckInstanceDown(self, instance, "cannot change disk template")
def CheckPrereq(self): """Check prerequisites.
("default_iallocator", None, ht.TMaybeString),
("default_iallocator", None, ht.TOr(ht.TString, ht.TNone)),
def Exec(self, feedback_fn): """Rename the cluster.
current_timeout = poll_timeout()
current_timeout = poll_timeout() * 1000
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, _linger_timeout=constants.CHILD_LINGER_TIMEOUT): """Run a command and return its output. @type cmd: string or list @param cmd: Command to run @type env: dict @param env: The environment to use @type via_shell: bool @param via_shell: if we should run via the shell @type cwd: string @param cwd: the working directory for the program @type interactive: boolean @param interactive: Run command interactive (without piping) @type timeout: int @param timeout: Timeout after the programm gets terminated @rtype: tuple @return: (out, err, status) """ poller = select.poll() stderr = subprocess.PIPE stdout = subprocess.PIPE stdin = subprocess.PIPE if interactive: stderr = stdout = stdin = None child = subprocess.Popen(cmd, shell=via_shell, stderr=stderr, stdout=stdout, stdin=stdin, close_fds=True, env=env, cwd=cwd) out = StringIO() err = StringIO() linger_timeout = None if timeout is None: poll_timeout = None else: poll_timeout = RunningTimeout(timeout, True).Remaining msg_timeout = ("Command %s (%d) run into execution timeout, terminating" % (cmd, child.pid)) msg_linger = ("Command %s (%d) run into linger timeout, killing" % (cmd, child.pid)) timeout_action = _TIMEOUT_NONE if not interactive: child.stdin.close() poller.register(child.stdout, select.POLLIN) poller.register(child.stderr, select.POLLIN) fdmap = { child.stdout.fileno(): (out, child.stdout), child.stderr.fileno(): (err, child.stderr), } for fd in fdmap: SetNonblockFlag(fd, True) while fdmap: if poll_timeout: current_timeout = poll_timeout() if current_timeout < 0: if linger_timeout is None: logging.warning(msg_timeout) if child.poll() is None: timeout_action = _TIMEOUT_TERM IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM) linger_timeout = RunningTimeout(_linger_timeout, True).Remaining lt = linger_timeout() if lt < 0: break pt = max(0, lt) else: pt = current_timeout else: pt = None pollresult = RetryOnSignal(poller.poll, pt) for fd, event in pollresult: if event & select.POLLIN or event & select.POLLPRI: data = fdmap[fd][1].read() # no data from read signifies EOF (the same as POLLHUP) if not data: poller.unregister(fd) del fdmap[fd] continue fdmap[fd][0].write(data) if (event & select.POLLNVAL or event & select.POLLHUP or event & select.POLLERR): poller.unregister(fd) del fdmap[fd] if timeout is not None: assert callable(poll_timeout) # We have no I/O left but it might still run if child.poll() is None: _WaitForProcess(child, poll_timeout()) # Terminate if still alive after timeout if child.poll() is None: if linger_timeout is None: logging.warning(msg_timeout) timeout_action = _TIMEOUT_TERM IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM) lt = _linger_timeout else: lt = linger_timeout() _WaitForProcess(child, lt) # Okay, still alive after timeout and linger timeout? Kill it! if child.poll() is None: timeout_action = _TIMEOUT_KILL logging.warning(msg_linger) IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL) out = out.getvalue() err = err.getvalue() status = child.wait() return out, err, status, timeout_action
lt = linger_timeout()
lt = linger_timeout() * 1000
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, _linger_timeout=constants.CHILD_LINGER_TIMEOUT): """Run a command and return its output. @type cmd: string or list @param cmd: Command to run @type env: dict @param env: The environment to use @type via_shell: bool @param via_shell: if we should run via the shell @type cwd: string @param cwd: the working directory for the program @type interactive: boolean @param interactive: Run command interactive (without piping) @type timeout: int @param timeout: Timeout after the programm gets terminated @rtype: tuple @return: (out, err, status) """ poller = select.poll() stderr = subprocess.PIPE stdout = subprocess.PIPE stdin = subprocess.PIPE if interactive: stderr = stdout = stdin = None child = subprocess.Popen(cmd, shell=via_shell, stderr=stderr, stdout=stdout, stdin=stdin, close_fds=True, env=env, cwd=cwd) out = StringIO() err = StringIO() linger_timeout = None if timeout is None: poll_timeout = None else: poll_timeout = RunningTimeout(timeout, True).Remaining msg_timeout = ("Command %s (%d) run into execution timeout, terminating" % (cmd, child.pid)) msg_linger = ("Command %s (%d) run into linger timeout, killing" % (cmd, child.pid)) timeout_action = _TIMEOUT_NONE if not interactive: child.stdin.close() poller.register(child.stdout, select.POLLIN) poller.register(child.stderr, select.POLLIN) fdmap = { child.stdout.fileno(): (out, child.stdout), child.stderr.fileno(): (err, child.stderr), } for fd in fdmap: SetNonblockFlag(fd, True) while fdmap: if poll_timeout: current_timeout = poll_timeout() if current_timeout < 0: if linger_timeout is None: logging.warning(msg_timeout) if child.poll() is None: timeout_action = _TIMEOUT_TERM IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM) linger_timeout = RunningTimeout(_linger_timeout, True).Remaining lt = linger_timeout() if lt < 0: break pt = max(0, lt) else: pt = current_timeout else: pt = None pollresult = RetryOnSignal(poller.poll, pt) for fd, event in pollresult: if event & select.POLLIN or event & select.POLLPRI: data = fdmap[fd][1].read() # no data from read signifies EOF (the same as POLLHUP) if not data: poller.unregister(fd) del fdmap[fd] continue fdmap[fd][0].write(data) if (event & select.POLLNVAL or event & select.POLLHUP or event & select.POLLERR): poller.unregister(fd) del fdmap[fd] if timeout is not None: assert callable(poll_timeout) # We have no I/O left but it might still run if child.poll() is None: _WaitForProcess(child, poll_timeout()) # Terminate if still alive after timeout if child.poll() is None: if linger_timeout is None: logging.warning(msg_timeout) timeout_action = _TIMEOUT_TERM IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM) lt = _linger_timeout else: lt = linger_timeout() _WaitForProcess(child, lt) # Okay, still alive after timeout and linger timeout? Kill it! if child.poll() is None: timeout_action = _TIMEOUT_KILL logging.warning(msg_linger) IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL) out = out.getvalue() err = err.getvalue() status = child.wait() return out, err, status, timeout_action
if result.failed and force:
if result.failed:
def CleanupInstance(self, instance_name): """Cleanup after a stopped instance
if os.path.exists(i):
if not os.path.exists(i):
def StartImportExportDaemon(mode, key_name, ca, host, port, instance, ieio, ieioargs): """Starts an import or export daemon. @param mode: Import/output mode @type key_name: string @param key_name: RSA key name (None to use cluster certificate) @type ca: string: @param ca: Remote CA in PEM format (None to use cluster certificate) @type host: string @param host: Remote host for export (None for import) @type port: int @param port: Remote port for export (None for import) @type instance: L{objects.Instance} @param instance: Instance object @param ieio: Input/output type @param ieioargs: Input/output arguments """ if mode == constants.IEM_IMPORT: prefix = "import" if not (host is None and port is None): _Fail("Can not specify host or port on import") elif mode == constants.IEM_EXPORT: prefix = "export" if host is None or port is None: _Fail("Host and port must be specified for an export") else: _Fail("Invalid mode %r", mode) if (key_name is None) ^ (ca is None): _Fail("Cluster certificate can only be used for both key and CA") (cmd_env, cmd_prefix, cmd_suffix) = \ _GetImportExportIoCommand(instance, mode, ieio, ieioargs) if key_name is None: # Use server.pem key_path = constants.NODED_CERT_FILE cert_path = constants.NODED_CERT_FILE assert ca is None else: (_, key_path, cert_path) = _GetX509Filenames(constants.CRYPTO_KEYS_DIR, key_name) assert ca is not None for i in [key_path, cert_path]: if os.path.exists(i): _Fail("File '%s' does not exist" % i) status_dir = _CreateImportExportStatusDir(prefix) try: status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE) pid_file = utils.PathJoin(status_dir, _IES_PID_FILE) ca_file = utils.PathJoin(status_dir, _IES_CA_FILE) if ca is None: # Use server.pem ca = utils.ReadFile(constants.NODED_CERT_FILE) utils.WriteFile(ca_file, data=ca, mode=0400) cmd = [ constants.IMPORT_EXPORT_DAEMON, status_file, mode, "--key=%s" % key_path, "--cert=%s" % cert_path, "--ca=%s" % ca_file, ] if host: cmd.append("--host=%s" % host) if port: cmd.append("--port=%s" % port) if cmd_prefix: cmd.append("--cmd-prefix=%s" % cmd_prefix) if cmd_suffix: cmd.append("--cmd-suffix=%s" % cmd_suffix) logfile = _InstanceLogName(prefix, instance.os, instance.name) # TODO: Once _InstanceLogName uses tempfile.mkstemp, StartDaemon has # support for receiving a file descriptor for output utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file, output=logfile) # The import/export name is simply the status directory name return os.path.basename(status_dir) except Exception: shutil.rmtree(status_dir, ignore_errors=True) raise
self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
def CheckArguments(self): self.opnode_name = _ExpandNodeName(self.cfg, self.op.node_name)
def SubmitOpCode(op, cl=None, feedback_fn=None):
def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None):
def SubmitOpCode(op, cl=None, feedback_fn=None): """Legacy function to submit an opcode. This is just a simple wrapper over the construction of the processor instance. It should be extended to better handle feedback and interaction functions. """ if cl is None: cl = GetClient() job_id = SendJob([op], cl) op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn) return op_results[0]
It will also add the dry-run parameter from the options passed, if true. """ if opts and opts.dry_run: op.dry_run = opts.dry_run
It will also process the opcodes if we're sending the via SendJob (otherwise SubmitOpCode does it). """
def SubmitOrSend(op, opts, cl=None, feedback_fn=None): """Wrapper around SubmitOpCode or SendJob. This function will decide, based on the 'opts' parameter, whether to submit and wait for the result of the opcode (and return it), or whether to just send the job and print its identifier. It is used in order to simplify the implementation of the '--submit' option. It will also add the dry-run parameter from the options passed, if true. """ if opts and opts.dry_run: op.dry_run = opts.dry_run if opts and opts.submit_only: job_id = SendJob([op], cl=cl) raise JobSubmittedException(job_id) else: return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
job_id = SendJob([op], cl=cl)
job = [op] SetGenericOpcodeOpts(job, opts) job_id = SendJob(job, cl=cl)
def SubmitOrSend(op, opts, cl=None, feedback_fn=None): """Wrapper around SubmitOpCode or SendJob. This function will decide, based on the 'opts' parameter, whether to submit and wait for the result of the opcode (and return it), or whether to just send the job and print its identifier. It is used in order to simplify the implementation of the '--submit' option. It will also add the dry-run parameter from the options passed, if true. """ if opts and opts.dry_run: op.dry_run = opts.dry_run if opts and opts.submit_only: job_id = SendJob([op], cl=cl) raise JobSubmittedException(job_id) else: return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts) def SetGenericOpcodeOpts(opcode_list, options): """Processor for generic options. This function updates the given opcodes based on generic command line options (like debug, dry-run, etc.). @param opcode_list: list of opcodes @param options: command line options or None @return: None (in-place modification) """ if not options: return for op in opcode_list: op.dry_run = options.dry_run op.debug_level = options.debug
def SubmitOrSend(op, opts, cl=None, feedback_fn=None): """Wrapper around SubmitOpCode or SendJob. This function will decide, based on the 'opts' parameter, whether to submit and wait for the result of the opcode (and return it), or whether to just send the job and print its identifier. It is used in order to simplify the implementation of the '--submit' option. It will also add the dry-run parameter from the options passed, if true. """ if opts and opts.dry_run: op.dry_run = opts.dry_run if opts and opts.submit_only: job_id = SendJob([op], cl=cl) raise JobSubmittedException(job_id) else: return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn)
continue disks = node_disks[nname] msg = nres.fail_msg _ErrorIf(msg, self.ENODERPC, nname, "while getting disk information: %s", nres.fail_msg) if msg:
def _CollectDiskInfo(self, nodelist, node_image, instanceinfo): """Gets per-disk status information for all instances.
data = len(disks) * [None]
data = len(disks) * [(False, "node offline")]
def _CollectDiskInfo(self, nodelist, node_image, instanceinfo): """Gets per-disk status information for all instances.
data = nres.payload
msg = nres.fail_msg _ErrorIf(msg, self.ENODERPC, nname, "while getting disk information: %s", msg) if msg: data = len(disks) * [(False, msg)] else: data = [] for idx, i in enumerate(nres.payload): if isinstance(i, (tuple, list)) and len(i) == 2: data.append(i) else: logging.warning("Invalid result from node %s, entry %d: %s", nname, idx, i) data.append((False, "Invalid result from the remote node"))
def _CollectDiskInfo(self, nodelist, node_image, instanceinfo): """Gets per-disk status information for all instances.
len(nnames) <= len(instanceinfo[inst].all_nodes)
len(nnames) <= len(instanceinfo[inst].all_nodes) and compat.all(isinstance(s, (tuple, list)) and len(s) == 2 for s in statuses)
def _CollectDiskInfo(self, nodelist, node_image, instanceinfo): """Gets per-disk status information for all instances.
"""Formats the fingerprint of L{paramiko.PKey.get_fingerprint()}
"""Format paramiko PKey fingerprint.
def FormatParamikoFingerprint(fingerprint): """Formats the fingerprint of L{paramiko.PKey.get_fingerprint()} @type fingerprint: str @param fingerprint: PKey fingerprint @return The string hex representation of the fingerprint """ assert len(fingerprint) % 2 == 0 return ":".join(re.findall(r"..", fingerprint.lower()))