rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
rth = None def __init__(self, domid):
|
def __init__(self, vif):
|
def commit(self): msg = os.read(self.msgfd.fileno(), 4) if msg != 'done': print 'Unknown message: %s' % msg
|
if not self.rth: self.rth = netlink.rtnl() self.devname = self._startimq(domid) dev = self.rth.getlink(self.devname) if not dev: raise BufferedNICException('could not find device %s' % self.devname) self.dev = dev['index'] self.handle = qdisc.TC_H_ROOT self.q = qdisc.QueueQdisc()
|
self.vif = vif self.pool = Netbufpool(selectnetbuf()) self.rth = getrth() self.setup()
|
def __init__(self, domid): self.installed = False
|
self._setup()
|
self.install()
|
def postsuspend(self): if not self.installed: self._setup()
|
req = qdisc.changerequest(self.dev, self.handle, self.q)
|
req = qdisc.changerequest(self.bufdevno, self.handle, self.q)
|
def _sendqmsg(self, action): self.q.action = action req = qdisc.changerequest(self.dev, self.handle, self.q) self.rth.talk(req.pack())
|
def _setup(self): q = self.rth.getqdisc(self.dev)
|
return True def setup(self): """install Remus queue on VIF outbound traffic""" self.bufdev = self.pool.get() devname = self.bufdev.devname bufdev = self.rth.getlink(devname) if not bufdev: raise BufferedNICException('could not find device %s' % devname) self.bufdev.install(self.vif) self.bufdevno = bufdev['index'] self.handle = qdisc.TC_H_ROOT self.q = qdisc.QueueQdisc() if not util.modprobe('sch_queue'): raise BufferedNICException('could not load sch_queue module') def install(self): devname = self.bufdev.devname q = self.rth.getqdisc(self.bufdevno)
|
def _sendqmsg(self, action): self.q.action = action req = qdisc.changerequest(self.dev, self.handle, self.q) self.rth.talk(req.pack())
|
'discipline on %s' % self.devname) print 'installing buffer on %s' % self.devname req = qdisc.addrequest(self.dev, self.handle, self.q)
|
'discipline on %s' % devname) print ('installing buffer on %s... ' % devname), req = qdisc.addrequest(self.bufdevno, self.handle, self.q)
|
def _setup(self): q = self.rth.getqdisc(self.dev) if q: if q['kind'] == 'queue': self.installed = True return if q['kind'] != 'pfifo_fast': raise BufferedNICException('there is already a queueing ' 'discipline on %s' % self.devname)
|
req = qdisc.delrequest(self.dev, self.handle)
|
req = qdisc.delrequest(self.bufdevno, self.handle)
|
def uninstall(self): if self.installed: req = qdisc.delrequest(self.dev, self.handle) self.rth.talk(req.pack()) self.installed = False
|
def _startimq(self, domid): imqebt = '/usr/lib/xen/bin/imqebt' imqdev = 'imq0' vid = 'vif%d.0' % domid for mod in ['sch_queue', 'imq', 'ebt_imq']: util.runcmd(['modprobe', mod]) util.runcmd("ip link set %s up" % (imqdev)) util.runcmd("%s -F FORWARD" % (imqebt)) util.runcmd("%s -A FORWARD -i %s -j imq --todev %s" % (imqebt, vid, imqdev)) return imqdev
|
self.bufdev.uninstall() self.pool.put(self.bufdev)
|
def _startimq(self, domid): # stopgap hack to set up IMQ for an interface. Wrong in many ways. imqebt = '/usr/lib/xen/bin/imqebt' imqdev = 'imq0' vid = 'vif%d.0' % domid for mod in ['sch_queue', 'imq', 'ebt_imq']: util.runcmd(['modprobe', mod]) util.runcmd("ip link set %s up" % (imqdev)) util.runcmd("%s -F FORWARD" % (imqebt)) util.runcmd("%s -A FORWARD -i %s -j imq --todev %s" % (imqebt, vid, imqdev))
|
return 'VM %d (%s), MACs: [%s], disks: [%s]' % \ (self.domid, self.name, self.epoch, ', '.join(self.macs),
|
return 'VM %d (%s), vifs: [%s], disks: [%s]' % \ (self.domid, self.name, ', '.join([str(v) for v in self.vifs]),
|
def __str__(self): return 'VM %d (%s), MACs: [%s], disks: [%s]' % \ (self.domid, self.name, self.epoch, ', '.join(self.macs), ', '.join([str(d) for d in self.disks]))
|
(pci_list, cfg_list) = save_pci_conf_space([self.name]) pos = self.find_cap_offset(PCI_CAP_ID_AF) self.pci_conf_write8(pos + PCI_AF_CTL, PCI_AF_CTL_FLR) time.sleep(0.100) restore_pci_conf_space((pci_list, cfg_list))
|
af_pos = self.find_cap_offset(PCI_CAP_ID_AF) self.do_AF_FLR(af_pos)
|
def do_FLR(self, is_hvm, strict_check): """ Perform FLR (Functional Level Reset) for the device. """ if self.dev_type == DEV_TYPE_PCIe_ENDPOINT: # If PCIe device supports FLR, we use it. if self.pcie_flr: (pci_list, cfg_list) = save_pci_conf_space([self.name]) pos = self.find_cap_offset(PCI_CAP_ID_EXP) self.pci_conf_write32(pos + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_FLR) # We must sleep at least 100ms for the completion of FLR time.sleep(0.100) restore_pci_conf_space((pci_list, cfg_list)) else: if self.bus == 0: self.do_FLR_for_integrated_device() else: funcs = self.find_all_the_multi_functions()
|
self.do_FLR_for_integrated_device()
|
if self.slot == 0x02 and self.func == 0x0: vendor_id = self.pci_conf_read16(PCI_VENDOR_ID) if vendor_id != VENDOR_INTEL: return class_id = self.pci_conf_read16(PCI_CLASS_DEVICE) if class_id != PCI_CLASS_ID_VGA: return device_id = self.pci_conf_read16(PCI_DEVICE_ID) if device_id == PCI_DEVICE_ID_IGFX_GM45: self.do_FLR_for_GM45_iGFX() elif device_id == PCI_DEVICE_ID_IGFX_EAGLELAKE or \ device_id == PCI_DEVICE_ID_IGFX_Q45 or \ device_id == PCI_DEVICE_ID_IGFX_G45 or \ device_id == PCI_DEVICE_ID_IGFX_G41: self.do_FLR_for_intel_4Series_iGFX() else: log.debug("Unknown iGFX device_id:%x", device_id) else: self.do_FLR_for_integrated_device()
|
def do_FLR(self, is_hvm, strict_check): """ Perform FLR (Functional Level Reset) for the device. """ if self.dev_type == DEV_TYPE_PCIe_ENDPOINT: # If PCIe device supports FLR, we use it. if self.pcie_flr: (pci_list, cfg_list) = save_pci_conf_space([self.name]) pos = self.find_cap_offset(PCI_CAP_ID_EXP) self.pci_conf_write32(pos + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_FLR) # We must sleep at least 100ms for the completion of FLR time.sleep(0.100) restore_pci_conf_space((pci_list, cfg_list)) else: if self.bus == 0: self.do_FLR_for_integrated_device() else: funcs = self.find_all_the_multi_functions()
|
t = xstransact("%s/device" % self.vmpath)
|
def _releaseDevices(self, suspend = False): """Release all domain's devices. Nothrow guarantee.""" t = xstransact("%s/device" % self.vmpath) if self.image: try: for dev in t.list('tap'): log.debug("Early removing %s", dev); self.getDeviceController('tap').destroyDevice(dev, True) time.sleep(0.1) log.debug("Destroying device model") self.image.destroyDeviceModel() except Exception, e: log.exception("Device model destroy failed %s" % str(e)) else: log.debug("No device model")
|
|
for dev in t.list('tap'): log.debug("Early removing %s", dev); self.getDeviceController('tap').destroyDevice(dev, True) time.sleep(0.1)
|
def _releaseDevices(self, suspend = False): """Release all domain's devices. Nothrow guarantee.""" t = xstransact("%s/device" % self.vmpath) if self.image: try: for dev in t.list('tap'): log.debug("Early removing %s", dev); self.getDeviceController('tap').destroyDevice(dev, True) time.sleep(0.1) log.debug("Destroying device model") self.image.destroyDeviceModel() except Exception, e: log.exception("Device model destroy failed %s" % str(e)) else: log.debug("No device model")
|
|
if devclass is 'tap': continue
|
def _releaseDevices(self, suspend = False): """Release all domain's devices. Nothrow guarantee.""" t = xstransact("%s/device" % self.vmpath) if self.image: try: for dev in t.list('tap'): log.debug("Early removing %s", dev); self.getDeviceController('tap').destroyDevice(dev, True) time.sleep(0.1) log.debug("Destroying device model") self.image.destroyDeviceModel() except Exception, e: log.exception("Device model destroy failed %s" % str(e)) else: log.debug("No device model")
|
|
return XEN_API_ON_CRASH_BEHAVIOUR_LEGACY[v] \ if v in XEN_API_ON_CRASH_BEHAVIOUR else v
|
if v in XEN_API_ON_CRASH_BEHAVIOUR: return XEN_API_ON_CRASH_BEHAVIOUR_LEGACY[v] else: return v
|
def convert_on_crash(v): v = str(v) return XEN_API_ON_CRASH_BEHAVIOUR_LEGACY[v] \ if v in XEN_API_ON_CRASH_BEHAVIOUR else v
|
'name': str,
|
def scrub_password(data): if type(data) == dict or type(data) == XendConfig: scrubbed = {} for key in data.keys(): if key == "vncpasswd": scrubbed[key] = "XXXXXXXX" else: scrubbed[key] = scrub_password(data[key]) return scrubbed elif type(data) == list: if len(data) == 2 and type(data[0]) == str and data[0] == 'vncpasswd': return ['vncpasswd', 'XXXXXXXX'] else: scrubbed = [] for entry in data: scrubbed.append(scrub_password(entry)) return scrubbed elif type(data) == tuple: scrubbed = [] for entry in data: scrubbed.append(scrub_password(entry)) return tuple(scrubbed) elif type(data) == str: return re.sub(r'\(vncpasswd\s+[^\)]+\)','(vncpasswd XXXXXX)', data) else: return data
|
|
'name',
|
def scrub_password(data): if type(data) == dict or type(data) == XendConfig: scrubbed = {} for key in data.keys(): if key == "vncpasswd": scrubbed[key] = "XXXXXXXX" else: scrubbed[key] = scrub_password(data[key]) return scrubbed elif type(data) == list: if len(data) == 2 and type(data[0]) == str and data[0] == 'vncpasswd': return ['vncpasswd', 'XXXXXXXX'] else: scrubbed = [] for entry in data: scrubbed.append(scrub_password(entry)) return scrubbed elif type(data) == tuple: scrubbed = [] for entry in data: scrubbed.append(scrub_password(entry)) return tuple(scrubbed) elif type(data) == str: return re.sub(r'\(vncpasswd\s+[^\)]+\)','(vncpasswd XXXXXX)', data) else: return data
|
|
self.rta_len = align(self.fmtlen + len(self.body))
|
self.rta_len = self.fmtlen + align(len(self.body), 2)
|
def pack(self): self.rta_len = align(self.fmtlen + len(self.body)) s = struct.pack(self.fmt, self.rta_len, self.rta_type) + self.body pad = self.rta_len - len(s) if pad: s += '\0' * pad return s
|
self.body = msg[align(self.fmtlen):self.rta_len]
|
self.body = msg[self.fmtlen:self.rta_len]
|
def unpack(self, msg): args = struct.unpack(self.fmt, msg[:self.fmtlen]) self.rta_len, self.rta_type = args
|
t = xstransact("%s/device" % self.vmpath)
|
def _releaseDevices(self, suspend = False): """Release all domain's devices. Nothrow guarantee.""" if self.image: try: log.debug("Destroying device model") self.image.destroyDeviceModel() except Exception, e: log.exception("Device model destroy failed %s" % str(e)) else: log.debug("No device model")
|
|
s = scsiinfo.split()
|
s = scsiinfo.replace(']', '] ').split()
|
def _vscsi_get_scsidevices_by_lsscsi(option = ""): """ get all scsi devices information by lsscsi """ devices = [] for scsiinfo in os.popen('{ lsscsi -g %s; } 2>/dev/null' % option).readlines(): s = scsiinfo.split() hctl = s[0][1:-1] try: devname = s[-2].split('/dev/')[1] except IndexError: devname = None try: sg = s[-1].split('/dev/')[1] scsi_id = _vscsi_get_scsiid(sg) except IndexError: sg = None scsi_id = None devices.append([hctl, devname, sg, scsi_id]) return devices
|
dtd = xmldtd.load_dtd(self.dtd) parser = xmlproc.XMLProcessor() parser.set_application(xmlval.ValidatingApp(dtd, parser)) parser.dtd = dtd parser.ent = dtd parser.parse_resource(file)
|
try: dtd = etree.DTD(open(self.dtd, 'r')) except IOError: warnings.warn('DTD file %s not found.' % (self.dtd), UserWarning) return tree = etree.parse(file) root = tree.getroot() if not dtd.validate(root): self.handle_dtd_errors(dtd)
|
def check_dtd(self, file): """ Check file against DTD. Use this if possible as it gives nice error messages """ dtd = xmldtd.load_dtd(self.dtd) parser = xmlproc.XMLProcessor() parser.set_application(xmlval.ValidatingApp(dtd, parser)) parser.dtd = dtd parser.ent = dtd parser.parse_resource(file)
|
dtd = xmldtd.load_dtd(self.dtd) app = xmlval.ValidatingApp(dtd, self) app.set_locator(self) self.dom2sax(dom, app) def report_error(self, number, args=None): self.errors = xmlproc.errors.english
|
def check_dom_against_dtd(self, dom): """ Check DOM again DTD. Doesn't give as nice error messages. (no location info) """ dtd = xmldtd.load_dtd(self.dtd) app = xmlval.ValidatingApp(dtd, self) app.set_locator(self) self.dom2sax(dom, app)
|
|
msg = self.errors[number] if args != None: msg = msg % args except KeyError: msg = self.errors[4002] % number print msg
|
dtd = etree.DTD(open(self.dtd, 'r')) except IOError: warnings.warn('DTD file %s not found.' % (self.dtd), UserWarning) return root = etree.XML(dom.toxml()) if not dtd.validate(root): self.handle_dtd_errors(dtd) def handle_dtd_errors(self, dtd): for err in dtd.error_log: err_str = 'ERROR: %s\n' % (str(err),) sys.stderr.write(err_str) sys.stderr.flush()
|
def report_error(self, number, args=None): self.errors = xmlproc.errors.english try: msg = self.errors[number] if args != None: msg = msg % args except KeyError: msg = self.errors[4002] % number # Unknown err msg :-) print msg sys.exit(-1)
|
def get_line(self): return -1 def get_column(self): return -1 def dom2sax(self, dom, app): """ Take a dom tree and tarverse it, issuing SAX calls to app. """ for child in dom.childNodes: if child.nodeType == child.TEXT_NODE: data = child.nodeValue app.handle_data(data, 0, len(data)) else: app.handle_start_tag( child.nodeName, self.attrs_to_dict(child.attributes)) self.dom2sax(child, app) app.handle_end_tag(child.nodeName) def attrs_to_dict(self, attrs): return dict(attrs.items())
|
def report_error(self, number, args=None): self.errors = xmlproc.errors.english try: msg = self.errors[number] if args != None: msg = msg % args except KeyError: msg = self.errors[4002] % number # Unknown err msg :-) print msg sys.exit(-1)
|
|
try: return (devname, os.stat(n).st_rdev) except Exception, ex: pass
|
def blkdev_name_to_number(name): """Take the given textual block-device name (e.g., '/dev/sda1', 'hda') and return the device number used by the OS. """ n = expand_dev_name(name) devname = 'virtual-device' devnum = None try: return (devname, os.stat(n).st_rdev) except Exception, ex: pass scsi_major = [ 8, 65, 66, 67, 68, 69, 70, 71, 128, 129, 130, 131, 132, 133, 134, 135 ] if re.match( '/dev/sd[a-z]([1-9]|1[0-5])?$', n): major = scsi_major[(ord(n[7:8]) - ord('a')) / 16] minor = ((ord(n[7:8]) - ord('a')) % 16) * 16 + int(n[8:] or 0) devnum = major * 256 + minor elif re.match( '/dev/sd[a-i][a-z]([1-9]|1[0-5])?$', n): major = scsi_major[((ord(n[7:8]) - ord('a') + 1) * 26 + (ord(n[8:9]) - ord('a'))) / 16 ] minor = (((ord(n[7:8]) - ord('a') + 1 ) * 26 + (ord(n[8:9]) - ord('a'))) % 16) * 16 + int(n[9:] or 0) devnum = major * 256 + minor elif re.match( '/dev/hd[a-t]([1-9]|[1-5][0-9]|6[0-3])?$', n): ide_majors = [ 3, 22, 33, 34, 56, 57, 88, 89, 90, 91 ] major = ide_majors[(ord(n[7:8]) - ord('a')) / 2] minor = ((ord(n[7:8]) - ord('a')) % 2) * 64 + int(n[8:] or 0) devnum = major * 256 + minor elif re.match( '/dev/xvd[a-p]([1-9]|1[0-5])?$', n): devnum = (202 << 8) + ((ord(n[8:9]) - ord('a')) << 4) + int(n[9:] or 0) elif re.match('/dev/xvd[q-z]([1-9]|1[0-5])?$', n): devname = 'virtual-device-ext' devnum = (1 << 28) + ((ord(n[8:9]) - ord('a')) << 8) + int(n[9:] or 0) elif re.match('/dev/xvd[a-i][a-z]([1-9]|1[0-5])?$', n): devname = 'virtual-device-ext' devnum = (1 << 28) + (((ord(n[8:9]) - ord('a') + 1) * 26 + (ord(n[9:10]) - ord('a'))) << 8) + int(n[10:] or 0) elif re.match( '^(0x)[0-9a-fA-F]+$', name ): devnum = string.atoi(name, 16) elif re.match('^[0-9]+$', name): devnum = string.atoi(name, 10) return (devname, devnum)
|
|
if count < 100: continue
|
if count > 100: break
|
def createDeviceModel(self, restore = False): if self.device_model is None: return if self.pid: return # Execute device model. #todo: Error handling args = self.getDeviceModelArgs(restore) env = dict(os.environ) if self.display: env['DISPLAY'] = self.display if self.xauthority: env['XAUTHORITY'] = self.xauthority unique_id = "%i-%i" % (self.vm.getDomid(), time.time()) sentinel_path = sentinel_path_prefix + unique_id sentinel_path_fifo = sentinel_path + '.fifo' os.mkfifo(sentinel_path_fifo, 0600) sentinel_write = file(sentinel_path_fifo, 'r+') self._openSentinel(sentinel_path_fifo) self.vm.storeDom("image/device-model-fifo", sentinel_path_fifo) xstransact.Mkdir("/local/domain/0/device-model/%i" % self.vm.getDomid()) xstransact.SetPermissions("/local/domain/0/device-model/%i" % self.vm.getDomid(), { 'dom': self.vm.getDomid(), 'read': True, 'write': True }) log.info("spawning device models: %s %s", self.device_model, args) # keep track of pid and spawned options to kill it later
|
nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if len(info['node_to_cpu'][i]) == 0 or i not in node_list:
|
if len(info['node_to_cpu'][i]) == 0:
|
def find_relaxed_node(node_list): import sys nr_nodes = info['nr_nodes'] if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if len(info['node_to_cpu'][i]) == 0 or i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
cores_per_node = info['nr_cpus'] / info['nr_nodes'] nodes_required = (self.info['VCPUs_max'] + cores_per_node - 1) / cores_per_node if nodes_required > 1: log.debug("allocating %d NUMA nodes", nodes_required) best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['nr_nodes']))) for i in best_nodes[:nodes_required - 1]: cpumask = cpumask + info['node_to_cpu'][i]
|
best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['nr_nodes']))) for node_idx in best_nodes: if len(cpumask) >= self.info['VCPUs_max']: break cpumask = cpumask + info['node_to_cpu'][node_idx] log.debug("allocating additional NUMA node %d", node_idx)
|
def find_relaxed_node(node_list): import sys nr_nodes = info['nr_nodes'] if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if len(info['node_to_cpu'][i]) == 0 or i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
dev_type = sxp.name(dev_config)
|
def device_create(self, dev_config): """Create a new device.
|
|
if os.path.dirname(file) == "":
|
if file and os.path.dirname(file) == "":
|
def get_xend_tcp_xmlrpc_server_ssl_key_file(self): name = 'xend-tcp-xmlrpc-server-ssl-key-file' file = self.get_config_string(name) if os.path.dirname(file) == "": file = auxbin.xen_configdir() + '/' + file; if not os.path.exists(file): raise XendError("invalid xend config %s: directory '%s' does not exist" % (name, file)) return file
|
if not os.path.exists(file):
|
if file and not os.path.exists(file):
|
def get_xend_tcp_xmlrpc_server_ssl_key_file(self): name = 'xend-tcp-xmlrpc-server-ssl-key-file' file = self.get_config_string(name) if os.path.dirname(file) == "": file = auxbin.xen_configdir() + '/' + file; if not os.path.exists(file): raise XendError("invalid xend config %s: directory '%s' does not exist" % (name, file)) return file
|
if os.path.dirname(file) == "":
|
if file and os.path.dirname(file) == "":
|
def get_xend_tcp_xmlrpc_server_ssl_cert_file(self): name = 'xend-tcp-xmlrpc-server-ssl-cert-file' file = self.get_config_string(name) if os.path.dirname(file) == "": file = auxbin.xen_configdir() + '/' + file; if not os.path.exists(file): raise XendError("invalid xend config %s: directory '%s' does not exist" % (name, file)) return file
|
if not os.path.exists(file):
|
if file and not os.path.exists(file):
|
def get_xend_tcp_xmlrpc_server_ssl_cert_file(self): name = 'xend-tcp-xmlrpc-server-ssl-cert-file' file = self.get_config_string(name) if os.path.dirname(file) == "": file = auxbin.xen_configdir() + '/' + file; if not os.path.exists(file): raise XendError("invalid xend config %s: directory '%s' does not exist" % (name, file)) return file
|
vif_properties = \ map(server.xenapi.VIF.get_runtime_properties, vif_refs)
|
vif_properties = [] for vif_ref in vif_refs: vif_property = server.xenapi.VIF.get_runtime_properties(vif_ref) vif_property['mac'] = server.xenapi.VIF.get_MAC(vif_ref) vif_properties.append(vif_property)
|
def xm_network_list(args): (use_long, params) = arg_check_for_resource_list(args, "network-list") dom = params[0] if serverType == SERVER_XEN_API: vif_refs = server.xenapi.VM.get_VIFs(get_single_vm(dom)) vif_properties = \ map(server.xenapi.VIF.get_runtime_properties, vif_refs) devs = map(lambda (handle, properties): [handle, map2sxp(properties)], zip(range(len(vif_properties)), vif_properties)) else: devs = server.xend.domain.getDeviceSxprs(dom, 'vif') if use_long: map(PrettyPrint.prettyprint, devs) else: hdr = 0 for x in devs: if hdr == 0: print 'Idx BE MAC Addr. handle state evt-ch tx-/rx-ring-ref BE-path' hdr = 1 ni = parse_dev_info(x[1]) ni['idx'] = int(x[0]) print ("%(idx)-3d " "%(backend-id)-3d" "%(mac)-17s " "%(handle)-3d " "%(state)-3d " "%(event-ch)-3d " "%(tx-ring-ref)-5d/%(rx-ring-ref)-5d " "%(be-path)-30s " % ni)
|
balloon.free(memory + shadow, dominfo)
|
vtd_mem = 0 info = xc.physinfo() if 'hvm_directio' in info['virt_caps']: vtd_mem = 4 * (dominfo.info['memory_static_max'] / 1024 / 1024) vtd_mem = ((vtd_mem + 1023) / 1024) * 1024 balloon.free(memory + shadow + vtd_mem, dominfo)
|
def restore(xd, fd, dominfo = None, paused = False, relocating = False): try: if not os.path.isdir("/var/lib/xen"): os.makedirs("/var/lib/xen") except Exception, exn: log.exception("Can't create directory '/var/lib/xen'") raise XendError("Can't create directory '/var/lib/xen'") signature = read_exact(fd, len(SIGNATURE), "not a valid guest state file: signature read") if signature != SIGNATURE: raise XendError("not a valid guest state file: found '%s'" % signature) l = read_exact(fd, sizeof_int, "not a valid guest state file: config size read") vmconfig_size = unpack("!i", l)[0] vmconfig_buf = read_exact(fd, vmconfig_size, "not a valid guest state file: config read") p = sxp.Parser() p.input(vmconfig_buf) if not p.ready: raise XendError("not a valid guest state file: config parse") vmconfig = p.get_val() if not relocating: domconfig = XendConfig(sxp_obj = vmconfig) othervm = xd.domain_lookup_nr(domconfig["name_label"]) if othervm is None or othervm.domid is None: othervm = xd.domain_lookup_nr(domconfig["uuid"]) if othervm is not None and othervm.domid is not None: raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid)) if dominfo: dominfo.resume() else: dominfo = xd.restore_(vmconfig) image_cfg = dominfo.info.get('image', {}) is_hvm = dominfo.info.is_hvm() if is_hvm: nomigrate = dominfo.info['platform'].get('nomigrate', 0) else: nomigrate = dominfo.info['platform'].get('nomigrate') if nomigrate is None: nomigrate = 0 if int(nomigrate) != 0: dominfo.destroy() raise XendError("cannot restore non-migratable domain") store_port = dominfo.getStorePort() console_port = dominfo.getConsolePort() assert store_port assert console_port # if hvm, pass mem size to calculate the store_mfn if is_hvm: apic = int(dominfo.info['platform'].get('apic', 0)) pae = int(dominfo.info['platform'].get('pae', 0)) log.info("restore hvm domain %d, apic=%d, pae=%d", dominfo.domid, apic, pae) else: apic = 0 pae = 0 try: restore_image = image.create(dominfo, dominfo.info) memory = restore_image.getRequiredAvailableMemory( dominfo.info['memory_dynamic_max'] / 1024) maxmem = restore_image.getRequiredAvailableMemory( dominfo.info['memory_static_max'] / 1024) shadow = restore_image.getRequiredShadowMemory( dominfo.info['shadow_memory'] * 1024, dominfo.info['memory_static_max'] / 1024) log.debug("restore:shadow=0x%x, _static_max=0x%x, _static_min=0x%x, ", dominfo.info['shadow_memory'], dominfo.info['memory_static_max'], dominfo.info['memory_static_min']) # Round shadow up to a multiple of a MiB, as shadow_mem_control # takes MiB and we must not round down and end up under-providing. shadow = ((shadow + 1023) / 1024) * 1024 # set memory limit xc.domain_setmaxmem(dominfo.getDomid(), maxmem) balloon.free(memory + shadow, dominfo) shadow_cur = xc.shadow_mem_control(dominfo.getDomid(), shadow / 1024) dominfo.info['shadow_memory'] = shadow_cur superpages = restore_image.superpages cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE), fd, dominfo.getDomid(), store_port, console_port, int(is_hvm), pae, apic, superpages]) log.debug("[xc_restore]: %s", string.join(cmd)) handler = RestoreInputHandler() forkHelper(cmd, fd, handler.handler, True) # We don't want to pass this fd to any other children -- we # might need to recover the disk space that backs it. try: flags = fcntl.fcntl(fd, fcntl.F_GETFD) flags |= fcntl.FD_CLOEXEC fcntl.fcntl(fd, fcntl.F_SETFD, flags) except: pass if handler.store_mfn is None: raise XendError('Could not read store MFN') if not is_hvm and handler.console_mfn is None: raise XendError('Could not read console MFN') restore_image.setCpuid() # xc_restore will wait for source to close connection dominfo.completeRestore(handler.store_mfn, handler.console_mfn) # # We shouldn't hold the domains_lock over a waitForDevices # As this function sometime gets called holding this lock, # we must release it and re-acquire it appropriately # from xen.xend import XendDomain lock = True; try: XendDomain.instance().domains_lock.release() except: lock = False; try: dominfo.waitForDevices() # Wait for backends to set up finally: if lock: XendDomain.instance().domains_lock.acquire() if not paused: dominfo.unpause() return dominfo except Exception, exn: dominfo.destroy() log.exception(exn) raise exn
|
return "d%d" %(self,disk,)
|
return "d%d" %(self.disk,)
|
def __repr__(self): if self.part is not None: return "d%dp%d" %(self.disk, self.part) else: return "d%d" %(self,disk,)
|
if target is not None and int(target) is self.domid :
|
if target is not None and int(target) == self.domid:
|
def getStubdomDomid(self): dom_list = xstransact.List('/local/domain') for d in dom_list: target = xstransact.Read('/local/domain/' + d + '/target') if target is not None and int(target) is self.domid : return int(d) return None
|
ret = ret + ['-vcpu_avail', str(self.vm.getVCpuAvail())]
|
ret = ret + ['-vcpu_avail', hex(self.vm.getVCpuAvail())]
|
def parseDeviceModelArgs(self, vmConfig): ret = ImageHandler.parseDeviceModelArgs(self, vmConfig) ret = ret + ['-vcpus', str(self.vm.getVCpuCount())] ret = ret + ['-vcpu_avail', str(self.vm.getVCpuAvail())]
|
def format_comment(level, comment): indent = reduce(lambda x,y: x + " ", range(level), "") s = "%s/*\n" % indent s += "%s * " % indent comment = comment.replace("\n", "\n%s * " % indent) x = re.compile(r'^%s \* $' % indent, re.MULTILINE) comment = x.sub("%s *" % indent, comment) s += comment s += "\n" s += "%s */" % indent s += "\n" return s
|
class Builtin(Type): """Builtin type""" def __init__(self, typename, **kwargs): Type.__init__(self, typename, **kwargs)
|
def format_comment(level, comment): indent = reduce(lambda x,y: x + " ", range(level), "") s = "%s/*\n" % indent s += "%s * " % indent comment = comment.replace("\n", "\n%s * " % indent) x = re.compile(r'^%s \* $' % indent, re.MULTILINE) comment = x.sub("%s *" % indent, comment) s += comment s += "\n" s += "%s */" % indent s += "\n" return s
|
def libxl_C_type_of(ty): return ty.typename
|
class UInt(Type): def __init__(self, w, **kwargs): kwargs.setdefault('namespace', None) Type.__init__(self, "uint%d_t" % w, **kwargs)
|
def libxl_C_type_of(ty): return ty.typename
|
def libxl_C_instance_of(ty, instancename): if isinstance(ty, libxltypes.BitField): return libxl_C_type_of(ty) + " " + instancename + ":%d" % ty.width elif isinstance(ty, libxltypes.Aggregate) and ty.typename is None: if instancename is None: return libxl_C_type_define(ty) else: return libxl_C_type_define(ty) + " " + instancename else: return libxl_C_type_of(ty) + " " + instancename
|
self.width = w
|
def libxl_C_instance_of(ty, instancename): if isinstance(ty, libxltypes.BitField): return libxl_C_type_of(ty) + " " + instancename + ":%d" % ty.width elif isinstance(ty, libxltypes.Aggregate) and ty.typename is None: if instancename is None: return libxl_C_type_define(ty) else: return libxl_C_type_define(ty) + " " + instancename else: return libxl_C_type_of(ty) + " " + instancename
|
def libxl_C_type_define(ty, indent = ""): s = "" if isinstance(ty, libxltypes.Aggregate): if ty.comment is not None: s += format_comment(0, ty.comment)
|
class BitField(Type): def __init__(self, ty, w, **kwargs): kwargs.setdefault('namespace', None) Type.__init__(self, ty.typename, **kwargs)
|
def libxl_C_type_define(ty, indent = ""): s = "" if isinstance(ty, libxltypes.Aggregate): if ty.comment is not None: s += format_comment(0, ty.comment) if ty.typename is None: s += "%s {\n" % ty.kind else: s += "typedef %s {\n" % ty.kind for f in ty.fields: if f.comment is not None: s += format_comment(4, f.comment) x = libxl_C_instance_of(f.type, f.name) if f.const: x = "const " + x x = x.replace("\n", "\n ") s += " " + x + ";\n" if ty.typename is None: s += "}" else: s += "} %s" % ty.typename else: raise NotImplementedError("%s" % type(ty)) return s.replace("\n", "\n%s" % indent)
|
if ty.typename is None: s += "%s {\n" % ty.kind else: s += "typedef %s {\n" % ty.kind
|
self.width = w
|
def libxl_C_type_define(ty, indent = ""): s = "" if isinstance(ty, libxltypes.Aggregate): if ty.comment is not None: s += format_comment(0, ty.comment) if ty.typename is None: s += "%s {\n" % ty.kind else: s += "typedef %s {\n" % ty.kind for f in ty.fields: if f.comment is not None: s += format_comment(4, f.comment) x = libxl_C_instance_of(f.type, f.name) if f.const: x = "const " + x x = x.replace("\n", "\n ") s += " " + x + ";\n" if ty.typename is None: s += "}" else: s += "} %s" % ty.typename else: raise NotImplementedError("%s" % type(ty)) return s.replace("\n", "\n%s" % indent)
|
for f in ty.fields: if f.comment is not None: s += format_comment(4, f.comment) x = libxl_C_instance_of(f.type, f.name) if f.const: x = "const " + x x = x.replace("\n", "\n ") s += " " + x + ";\n" if ty.typename is None: s += "}" else: s += "} %s" % ty.typename else: raise NotImplementedError("%s" % type(ty)) return s.replace("\n", "\n%s" % indent)
|
class Field(object): """An element of an Aggregate type""" def __init__(self, type, name, **kwargs): self.type = type self.name = name self.const = kwargs.setdefault('const', False) self.comment = kwargs.setdefault('comment', None) self.keyvar_expr = kwargs.setdefault('keyvar_expr', None)
|
s += "typedef %s {\n" % ty.kind
|
if __name__ == '__main__': if len(sys.argv) < 3: print >>sys.stderr, "Usage: gentypes.py <idl> <header>" sys.exit(1)
|
class Aggregate(Type): """A type containing a collection of other types""" def __init__(self, kind, typename, fields, **kwargs): Type.__init__(self, typename, **kwargs)
|
s += "typedef %s {\n" % ty.kind
|
idl = sys.argv[1] (_,types) = libxltypes.parse(idl) header = sys.argv[2] print "outputting libxl types to %s" % header
|
self.kind = kind
|
s += "typedef %s {\n" % ty.kind
|
f = open(header, "w") f.write("""
|
self.fields = [] for f in fields: if len(f) == 2: n,t = f const = False comment = None elif len(f) == 3: n,t,const = f comment = None else: n,t,const,comment = f self.fields.append(Field(t,n,const=const,comment=comment))
|
s += "typedef %s {\n" % ty.kind
|
/* * DO NOT EDIT. * * This file is autogenerated by * "%s" */ """ % " ".join(sys.argv)) for t in types: f.write(libxl_C_type_define(t) + ";\n") f.write("\n")
|
class Struct(Aggregate): def __init__(self, name, fields, **kwargs): Aggregate.__init__(self, "struct", name, fields, **kwargs)
|
f.write("""#ifndef __LIBXL_TYPES_H
|
f.write("""
|
class Union(Aggregate): def __init__(self, name, fields, **kwargs): Aggregate.__init__(self, "union", name, fields, **kwargs) class KeyedUnion(Aggregate): """A union which is keyed of another variable in the parent structure""" def __init__(self, name, keyvar_name, fields, **kwargs): Aggregate.__init__(self, "union", name, [], **kwargs) self.keyvar_name = keyvar_name for f in fields: n, kve, ty = f self.fields.append(Field(ty, n, keyvar_expr=kve)) class Reference(Type): """A reference to another type""" def __init__(self, ty, **kwargs): kwargs.setdefault('namespace', ty.namespace) typename = ty.typename[len(kwargs['namespace']):] Type.__init__(self, typename + " *", **kwargs) void = Builtin("void *", namespace = None) bool = Builtin("bool", namespace = None) size_t = Builtin("size_t", namespace = None) integer = Builtin("int", namespace = None) unsigned_integer = Builtin("unsigned int", namespace = None) unsigned = Builtin("unsigned int", namespace = None) unsigned_long = Builtin("unsigned long", namespace = None) uint8 = UInt(8) uint16 = UInt(16) uint32 = UInt(32) uint64 = UInt(64) domid = UInt(32) string = Builtin("char *", namespace = None) inaddr_ip = Builtin("struct in_addr", namespace = None) class OrderedDict(dict): """A dictionary which remembers insertion order. push to back on duplicate insertion""" def __init__(self): dict.__init__(self) self.__ordered = [] def __setitem__(self, key, value): try: self.__ordered.remove(key) except ValueError: pass self.__ordered.append(key) dict.__setitem__(self, key, value) def ordered_keys(self): return self.__ordered def ordered_values(self): return [self[x] for x in self.__ordered] def ordered_items(self): return [(x,self[x]) for x in self.__ordered] def parse(f): print >>sys.stderr, "Parsing %s" % f globs = {} locs = OrderedDict() for n,t in globals().items(): if isinstance(t, Type): globs[n] = t elif isinstance(t,type(object)) and issubclass(t, Type): globs[n] = t try: execfile(f, globs, locs) except SyntaxError,e: raise SyntaxError, \ "Errors were found at line %d while processing %s:\n\t%s"\ %(e.lineno,f,e.text) types = [t for t in locs.ordered_values() if isinstance(t,Type)] builtins = [t for t in types if isinstance(t,Builtin)] types = [t for t in types if not isinstance(t,Builtin)] return (builtins,types)
|
f.write("""#ifndef __LIBXL_TYPES_H
|
for ty in [t for t in types if t.autogenerate_destructor]:
|
for ty in [t for t in types if t.destructor_fn is not None and t.autogenerate_destructor]:
|
f.write("""#ifndef __LIBXL_TYPES_H
|
if vmConfig['platform'].get('localtime', 0):
|
if int(vmConfig['platform'].get('localtime', 0)):
|
def configure(self, vmConfig): """Config actions common to all unix-like domains.""" if '_temp_using_bootloader' in vmConfig: self.bootloader = True self.kernel = vmConfig['_temp_kernel'] self.cmdline = vmConfig['_temp_args'] self.ramdisk = vmConfig['_temp_ramdisk'] else: self.kernel = vmConfig['PV_kernel'] self.cmdline = vmConfig['PV_args'] self.ramdisk = vmConfig['PV_ramdisk'] # There a code-paths where use_tmp_xxx is not set at all; but if # this is set, the variable itself is a boolean. if 'use_tmp_kernel' in vmConfig and vmConfig['use_tmp_kernel']: self.use_tmp_kernel = True if 'use_tmp_ramdisk' in vmConfig and vmConfig['use_tmp_ramdisk']: self.use_tmp_ramdisk = True self.vm.storeVm(("image/ostype", self.ostype), ("image/kernel", self.kernel), ("image/cmdline", self.cmdline), ("image/ramdisk", self.ramdisk)) self.vm.permissionsVm("image/cmdline", { 'dom': self.vm.getDomid(), 'read': True } )
|
dpci_uuid = pci_dev_info.get('uuid', uuid.createString()) pci_dev_info['uuid'] = dpci_uuid
|
pci_dev_info['uuid'] = uuid.createString() if not pci_dev_info.has_key('key'): pci_dev_info['key'] = "%02x:%02x.%x" % \ (int(pci_dev_info['bus'], 16), int(pci_dev_info['slot'], 16), int(pci_dev_info['func'], 16)) if not pci_dev_info.has_key('vdevfn'): pci_dev_info['vdevfn'] = "0x%02x" % AUTO_PHP_SLOT
|
def pci_convert_sxp_to_dict(dev_sxp): """Convert pci device sxp to dict @param dev_sxp: device configuration @type dev_sxp: SXP object (parsed config) @return: dev_config @rtype: dictionary """ # Parsing the device SXP's. In most cases, the SXP looks # like this: # # [device, [vif, [mac, xx:xx:xx:xx:xx:xx], [ip 1.3.4.5]]] # # However, for PCI devices it looks like this: # # [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1], [func, 2]]] # # It seems the reasoning for this difference is because # pciif.py needs all the PCI device configurations at # the same time when creating the devices. # # To further complicate matters, Xen 2.0 configuration format # uses the following for pci device configuration: # # [device, [pci, [domain, 0], [bus, 0], [dev, 1], [func, 2]]] # For PCI device hotplug support, the SXP of PCI devices is # extendend like this: # # [device, [pci, [dev, [domain, 0], [bus, 0], [slot, 1], [func, 2], # [vdevfn, 0]], # [state, 'Initialising']]] # # 'vdevfn' shows the virtual hotplug slot number which the PCI device # is inserted in. This is only effective for HVM domains. # # state 'Initialising' indicates that the device is being attached, # while state 'Closing' indicates that the device is being detached. # # The Dict looks like this: # # { devs: [{domain: 0, bus: 0, slot: 1, func: 2, vdevfn: 0}], # states: ['Initialising'] } dev_config = {} pci_devs = [] for pci_dev in sxp.children(dev_sxp, 'dev'): pci_dev_info = dict(pci_dev[1:]) if 'opts' in pci_dev_info: pci_dev_info['opts'] = pci_opts_list_from_sxp(pci_dev) # append uuid to each pci device that does't already have one. if not pci_dev_info.has_key('uuid'): dpci_uuid = pci_dev_info.get('uuid', uuid.createString()) pci_dev_info['uuid'] = dpci_uuid pci_devs.append(pci_dev_info) dev_config['devs'] = pci_devs pci_states = [] for pci_state in sxp.children(dev_sxp, 'state'): try: pci_states.append(pci_state[1]) except IndexError: raise XendError("Error reading state while parsing pci sxp") dev_config['states'] = pci_states return dev_config
|
title_match = re.match('^menuentry "(.*)" (.*){', l)
|
title_match = re.match('^menuentry ["\'](.*)["\'] (.*){', l)
|
def parse(self, buf = None): if buf is None: if self.filename is None: raise ValueError, "No config file defined to parse!"
|
dom0._waitForDeviceUUID(dom0.create_vbd(vbd, disk))
|
mounted_vbd_uuid = dom0.create_vbd(vbd, disk); dom0._waitForDeviceUUID(mounted_vbd_uuid)
|
def _configureBootloader(self): """Run the bootloader if we're configured to do so."""
|
dom0.destroyDevice('tap', BOOTLOADER_LOOPBACK_DEVICE)
|
_, vbd_info = dom0.info['devices'][mounted_vbd_uuid] dom0.destroyDevice(dom0.getBlockDeviceClass(vbd_info['devid']), BOOTLOADER_LOOPBACK_DEVICE, force = True)
|
def _configureBootloader(self): """Run the bootloader if we're configured to do so."""
|
self.domains_lock.acquire() try: self._refresh(refresh_shutdown = False)
|
resu = False count = 0 while True: resu = self.domains_lock.acquire(0) if resu or count < 20: break count += 1 try: if resu: self._refresh(refresh_shutdown = False)
|
def list(self, state = DOM_STATE_RUNNING): """Get list of domain objects.
|
self.domains_lock.release()
|
if resu: self.domains_lock.release()
|
def list(self, state = DOM_STATE_RUNNING): """Get list of domain objects.
|
nr_nodes = info['max_node_id']+1
|
nr_nodes = info['max_node_index'] + 1
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
node_cpumask = info['node_to_cpu'][i]
|
node_cpumask = node_to_cpu[i]
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
if len(info['node_to_cpu'][i]) == 0:
|
if len(node_to_cpu[i]) == 0:
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i]))
|
nodeload[i] = int(nodeload[i] * 16 / len(node_to_cpu[i]))
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
info = xc.physinfo() if info['nr_nodes'] > 1: node_memory_list = info['node_to_memory']
|
info = xc.numainfo() if info['max_node_index'] > 0: node_memory_list = info['node_memfree'] node_to_cpu = [] for i in range(0, info['max_node_index'] + 1): node_to_cpu.append([]) for cpu, node in enumerate(xc.topologyinfo()['cpu_to_node']): node_to_cpu[node].append(cpu)
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
for i in range(0, info['max_node_id']+1): if node_memory_list[i] >= needmem and len(info['node_to_cpu'][i]) > 0:
|
for i in range(0, info['max_node_index'] + 1): if node_memory_list[i] >= needmem and len(node_to_cpu[i]) > 0:
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
cpumask = info['node_to_cpu'][best_node] best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['max_node_id']+1)))
|
cpumask = node_to_cpu[best_node] best_nodes = find_relaxed_node(filter(lambda x: x != best_node, range(0,info['max_node_index']+1)))
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
cpumask = cpumask + info['node_to_cpu'][node_idx]
|
cpumask = cpumask + node_to_cpu[node_idx]
|
def find_relaxed_node(node_list): import sys nr_nodes = info['max_node_id']+1 if node_list is None: node_list = range(0, nr_nodes) nodeload = [0] nodeload = nodeload * nr_nodes from xen.xend import XendDomain doms = XendDomain.instance().list('all') for dom in filter (lambda d: d.domid != self.domid, doms): cpuinfo = dom.getVCPUInfo() for vcpu in sxp.children(cpuinfo, 'vcpu'): if sxp.child_value(vcpu, 'online') == 0: continue cpumap = list(sxp.child_value(vcpu,'cpumap')) for i in range(0, nr_nodes): node_cpumask = info['node_to_cpu'][i] for j in node_cpumask: if j in cpumap: nodeload[i] += 1 break for i in range(0, nr_nodes): if len(info['node_to_cpu'][i]) == 0: nodeload[i] += 8 else: nodeload[i] = int(nodeload[i] * 16 / len(info['node_to_cpu'][i])) if i not in node_list: nodeload[i] += 8 return map(lambda x: x[0], sorted(enumerate(nodeload), key=lambda x:x[1]))
|
sys.exit(1)
|
usage(name)
|
def arg_check_for_resource_list(args, name): use_long = 0 try: (options, params) = getopt.gnu_getopt(args, 'l', ['long']) except getopt.GetoptError, opterr: err(opterr) sys.exit(1) for (k, v) in options: if k in ['-l', '--long']: use_long = 1 if len(params) == 0: print 'No domain parameter given' usage(name) if len(params) > 1: print 'No multiple domain parameters allowed' usage(name) return (use_long, params)
|
self.pci_perm_dev_config = pci_perm_dev_config
|
def __devIsUnconstrained( self ): if os.path.exists(PERMISSIVE_CONFIG_FILE): try: fin = file(PERMISSIVE_CONFIG_FILE, 'rb') try: pci_perm_dev_config = parse(fin) finally: fin.close() if pci_perm_dev_config is None: pci_perm_dev_config = [''] else: pci_perm_dev_config.insert(0, '') self.pci_perm_dev_config = pci_perm_dev_config except Exception, ex: raise XendError("Reading config file %s: %s" % (PERMISSIVE_CONFIG_FILE,str(ex))) else: log.info("Config file does not exist: %s" % PERMISSIVE_CONFIG_FILE) self.pci_perm_dev_config = ['xend-pci-perm-devs']
|
|
self.pci_perm_dev_config = ['xend-pci-perm-devs']
|
pci_perm_dev_config = ['xend-pci-perm-devs']
|
def __devIsUnconstrained( self ): if os.path.exists(PERMISSIVE_CONFIG_FILE): try: fin = file(PERMISSIVE_CONFIG_FILE, 'rb') try: pci_perm_dev_config = parse(fin) finally: fin.close() if pci_perm_dev_config is None: pci_perm_dev_config = [''] else: pci_perm_dev_config.insert(0, '') self.pci_perm_dev_config = pci_perm_dev_config except Exception, ex: raise XendError("Reading config file %s: %s" % (PERMISSIVE_CONFIG_FILE,str(ex))) else: log.info("Config file does not exist: %s" % PERMISSIVE_CONFIG_FILE) self.pci_perm_dev_config = ['xend-pci-perm-devs']
|
cfg["vcpus_params"]["weight"] = \ int(sxp.child_value(sxp_cfg, "cpu_weight", 256)) cfg["vcpus_params"]["cap"] = \ int(sxp.child_value(sxp_cfg, "cpu_cap", 0))
|
if not cfg["vcpus_params"].has_key("weight"): cfg["vcpus_params"]["weight"] = \ int(sxp.child_value(sxp_cfg, "cpu_weight", 256)) if not cfg["vcpus_params"].has_key("cap"): cfg["vcpus_params"]["cap"] = \ int(sxp.child_value(sxp_cfg, "cpu_cap", 0))
|
def _parse_sxp(self, sxp_cfg): """ Populate this XendConfig using the parsed SXP.
|
fake_token = encode_token(str(self.test_user.id), str('asdf;ljasdf'), reverse('urlcrypt_test_view'))
|
fake_token = encode_token([str(self.test_user.id), reverse('urlcrypt_test_view'), str(int(time.time()))], secret_key_f)
|
def test_login_token_failed_hax0r(self): fake_token = 'asdf;lhasdfdso' response = self.client.get(reverse('urlcrypt_redirect', args=(fake_token,))) self.assertRedirects(response, URLCRYPT_LOGIN_URL) fake_token = encode_token(str(self.test_user.id), str('asdf;ljasdf'), reverse('urlcrypt_test_view')) response = self.client.get(reverse('urlcrypt_redirect', args=(fake_token,))) self.assertRedirects(response, URLCRYPT_LOGIN_URL)
|
Only data starting from July 1st, 14:29 UTC is processed, since that is when our detector was turned on.
|
def process_events(filename, table, start=None, stop=None): """Do the actual data processing. This function starts a subprocess to unzip the data file, reads the data line by line and stores it in a pytables table, row by row. Only data starting from July 1st, 14:29 UTC is processed, since that is when our detector was turned on. :param filename: the KASCADE data filename :param table: the destination table """ f = gzip.open(filename) tablerow = table.row while True: # read a line from the subprocess stdout buffer line = f.readline() if not line: # no more lines left, EOF break # break up the line into an array of floats data = line.split(' ') data = [float(x) for x in data] # read all columns into KASCADE-named variables Irun, Ieve, Gt, Mmn, EnergyArray, Xc, Yc, Ze, Az, Size, Nmu, He0, \ Hmu0, He1, Hmu1, He2, Hmu2, He3, Hmu3, P200, T200 = data # if start and stop are specified, the following boils down to: # start < Gt < stop # but also take start is None and/or stop is None into # consideration if (start is None or start <= Gt) and (stop is None or Gt < stop): # condition matched, so process event # construct a pytables table row of all the data... tablerow['run_id'] = Irun tablerow['event_id'] = Ieve tablerow['timestamp'] = Gt tablerow['nanoseconds'] = Mmn tablerow['ext_timestamp'] = Gt * 1e9 + Mmn tablerow['energy'] = EnergyArray tablerow['core_pos'] = [Xc, Yc] tablerow['zenith'] = Ze tablerow['azimuth'] = Az tablerow['Num_e'] = Size tablerow['Num_mu'] = Nmu tablerow['dens_e'] = [He0, He1, He2, He3] tablerow['dens_mu'] = [Hmu0, Hmu1, Hmu2, Hmu3] tablerow['P200'] = P200 tablerow['T200'] = T200 # ...and store it tablerow.append() elif stop is not None and stop < Gt: # timestamp is after explicitly specified stop time, so no need # to process the rest of the data break # flush the table buffers and write them to disk table.flush()
|
|
Hmu0, He1, Hmu1, He2, Hmu2, He3, Hmu3, P200, T200 = data
|
Hmu0, He1, Hmu1, He2, Hmu2, He3, Hmu3, T200, P200 = data
|
def process_events(filename, table, start=None, stop=None): """Do the actual data processing. This function starts a subprocess to unzip the data file, reads the data line by line and stores it in a pytables table, row by row. Only data starting from July 1st, 14:29 UTC is processed, since that is when our detector was turned on. :param filename: the KASCADE data filename :param table: the destination table """ f = gzip.open(filename) tablerow = table.row while True: # read a line from the subprocess stdout buffer line = f.readline() if not line: # no more lines left, EOF break # break up the line into an array of floats data = line.split(' ') data = [float(x) for x in data] # read all columns into KASCADE-named variables Irun, Ieve, Gt, Mmn, EnergyArray, Xc, Yc, Ze, Az, Size, Nmu, He0, \ Hmu0, He1, Hmu1, He2, Hmu2, He3, Hmu3, P200, T200 = data # if start and stop are specified, the following boils down to: # start < Gt < stop # but also take start is None and/or stop is None into # consideration if (start is None or start <= Gt) and (stop is None or Gt < stop): # condition matched, so process event # construct a pytables table row of all the data... tablerow['run_id'] = Irun tablerow['event_id'] = Ieve tablerow['timestamp'] = Gt tablerow['nanoseconds'] = Mmn tablerow['ext_timestamp'] = Gt * 1e9 + Mmn tablerow['energy'] = EnergyArray tablerow['core_pos'] = [Xc, Yc] tablerow['zenith'] = Ze tablerow['azimuth'] = Az tablerow['Num_e'] = Size tablerow['Num_mu'] = Nmu tablerow['dens_e'] = [He0, He1, He2, He3] tablerow['dens_mu'] = [Hmu0, Hmu1, Hmu2, Hmu3] tablerow['P200'] = P200 tablerow['T200'] = T200 # ...and store it tablerow.append() elif stop is not None and stop < Gt: # timestamp is after explicitly specified stop time, so no need # to process the rest of the data break # flush the table buffers and write them to disk table.flush()
|
pylab.hist(dt, bins=100, range=(-1, 1), histtype='step', label="Shift %+g s" % shift)
|
try: pylab.hist(dt, bins=100, range=(-1, 1), histtype='step', label="Shift %+g s" % shift) except NameError: pass
|
def do_timeshifts(hevents, kevents, shifts, dtlimit=None, limit=None, h=None, k=None): """Search for coincidences using multiple time shifts This function enables you to search for coincidences multiple times, using a list of time shifts. Given a data file, the events are read into arrays and passed on to the search_coincidences function. For each shift, a histogram is plotted so you can get a feel for the goodness of the shift. The coincidences data from the last shift is returned. :param hevents: hisparc event table :param kevents: kascade event table :param shifts: a list of time shifts :param dtlimit: limit on the time difference between hisparc and kascade events in nanoseconds. If this limit is exceeded, coincidences are not stored. Default: None. :param limit: an optional limit on the number of kascade events used in the search :param h: prefetched array from hisparc table (optional) :param k: prefetched array from kascade table (optional) :return: An array of coincidences from the last shift ([dt in nanoseconds, hisparc event id, kascade event id]). """ # Get arrays from the tables. This is much, much faster than working # from the tables directly. Pity. if not h or not k: h, k = get_arrays_from_tables(hevents, kevents, limit) for shift in shifts: print "Calculating dt's for timeshift %.9f (%d nanoseconds)" % \ (shift, long(shift * 1e9)) coincidences = search_coincidences(h, k, shift, dtlimit) dt = [x[0] / 1e9 for x in coincidences] pylab.hist(dt, bins=100, range=(-1, 1), histtype='step', label="Shift %+g s" % shift) finish_graph() return coincidences
|
pylab.legend() pylab.xlabel("Time difference (s)") pylab.ylabel("Counts") pylab.title("Nearest neighbour events for HiSPARC / KASCADE") pylab.gca().axis('auto') pylab.gcf().show()
|
try: pylab.legend() pylab.xlabel("Time difference (s)") pylab.ylabel("Counts") pylab.title("Nearest neighbour events for HiSPARC / KASCADE") pylab.gca().axis('auto') pylab.gcf().show() except NameError: print "Unfortunately, the pylab interface was not available." print "No graphs at this point."
|
def finish_graph(): """Finish the histogram This function places a legend, axes titles and the like on the current figure. """ pylab.legend() pylab.xlabel("Time difference (s)") pylab.ylabel("Counts") pylab.title("Nearest neighbour events for HiSPARC / KASCADE") pylab.gca().axis('auto') pylab.gcf().show()
|
cond = '%d <= timestamp <= %d' % \
|
cond = '(%d <= timestamp) & (timestamp <= %d)' % \
|
def store_data(dst_file, dst_group, src_filename, t0, t1): """Copy data from a temporary file to the destination file This function takes a file containing downloaded data and copies it to the destination file, based on start and end timestamps. This can be further optimized at the expense of spaghetti code. """ # Open in rw mode, need to update blob idxs, if necessary with tables.openFile(src_filename, 'a') as src_file: src_group = src_file.listNodes('/')[0] dst_group = get_or_create_group(dst_file, dst_group) if 'blobs' in dst_group: len_blobs = len(dst_file.getNode(dst_group, 'blobs')) else: len_blobs = 0 for node in src_file.listNodes(src_group): dst_node = get_or_create_node(dst_file, dst_group, node) if node.name == 'blobs': for row in node: dst_node.append(row) elif node.name == 'events': if not t1: cond = 'timestamp >= %d' % \ calendar.timegm(t0.utctimetuple()) else: cond = '%d <= timestamp <= %d' % \ (calendar.timegm(t0.utctimetuple()), calendar.timegm(t1.utctimetuple())) if len_blobs: for row in node.readWhere(cond): row['traces'] += len_blobs dst_node.append([tuple(row)]) else: for row in node.readWhere(cond): dst_node.append([tuple(row)]) elif node.name == 'errors' and len_blobs: for row in node: row['messages'] += len_blobs dst_node.append([row[:]]) elif node.name == 'config' and len_blobs: for row in node: row['mas_version'] += len_blobs row['slv_version'] += len_blobs row['password'] += len_blobs row['buffer'] += len_blobs dst_node.append([row[:]]) else: rows = node.read() dst_node.append(rows) os.remove(src_filename)
|
def deg_to_rad(*args): """Take an optional amount of degree values and replace with radians. INPUT *args: Any number of arguments of ints, floats, and arrays OUTPUT new_values: Original inputs converted to radians """
|
def deg_rad(conversion, *args): """Take an optional amount of values and convert between degrees/radians. INPUT conversion: (str) desired output, either 'degrees' or 'radians' *args: Will accept ints, floats, and ndarrrays (from NumPy) OUTPUT results: Original inputs converted to radians or degrees """ if conversion == "radians": factor = (numpy.pi / 180.) else: factor = (180. / numpy.pi)
|
def deg_to_rad(*args): """Take an optional amount of degree values and replace with radians. INPUT *args: Any number of arguments of ints, floats, and arrays OUTPUT new_values: Original inputs converted to radians """ results = [] ## Iterate through each variable for arg in args: try: iterable = iter(arg) except TypeError: ## not iterable, just convert it arg = float(arg) * (numpy.pi / 180.) else: ## iterate over each index in possibly multi-d object for index in range(len(arg.flat)): arg.flat[index] = float(arg.flat[index]) * (numpy.pi / 180.) ## Add arg to results results.append(arg) return results
|
arg = float(arg) * (numpy.pi / 180.)
|
arg = float(arg) * factor
|
def deg_to_rad(*args): """Take an optional amount of degree values and replace with radians. INPUT *args: Any number of arguments of ints, floats, and arrays OUTPUT new_values: Original inputs converted to radians """ results = [] ## Iterate through each variable for arg in args: try: iterable = iter(arg) except TypeError: ## not iterable, just convert it arg = float(arg) * (numpy.pi / 180.) else: ## iterate over each index in possibly multi-d object for index in range(len(arg.flat)): arg.flat[index] = float(arg.flat[index]) * (numpy.pi / 180.) ## Add arg to results results.append(arg) return results
|
arg.flat[index] = float(arg.flat[index]) * (numpy.pi / 180.)
|
arg.flat[index] = float(arg.flat[index]) * factor
|
def deg_to_rad(*args): """Take an optional amount of degree values and replace with radians. INPUT *args: Any number of arguments of ints, floats, and arrays OUTPUT new_values: Original inputs converted to radians """ results = [] ## Iterate through each variable for arg in args: try: iterable = iter(arg) except TypeError: ## not iterable, just convert it arg = float(arg) * (numpy.pi / 180.) else: ## iterate over each index in possibly multi-d object for index in range(len(arg.flat)): arg.flat[index] = float(arg.flat[index]) * (numpy.pi / 180.) ## Add arg to results results.append(arg) return results
|
pitch_0, rct_matrix[:,2] = deg_rad("degrees", pitch_0, rct_matrix[:,2])
|
def rotor_analysis(rct_matrix, tip_speed_ratio, number_blades, pitch_0, blade_radius, hub_radius, lift_curve, drag_curve, method): """Returns performance statistics of a rotor. INPUT tip_speed_ratio: (float) The tip speed ratio number_blades: (int) the number of blades pitch_0 : (float) initial pitch angle relative to tip, deg blade_radius: (float) radius in meters hub_radius: (float) hub radius in meters lift_curve: (array-like) either linear slope and intercept or emperical C_l vs. AoA points drag_curve: (array-like) either linear slope and intercept or emperical C_d vs. C_l points rct_matrix: (numpy.ndarray) 3 x n array of fradius, chord, twist on each line fradius: (float) nondimensional fractional radius along blade chord: (float) nondimensional length twist: (float) in degrees OUTPUT rotor_stats: (ndarray) 7 x n, of the following angle_of_attack: (float) estimated angle of attack in degrees angle_of_rwind: (float) estimated angle of relative wind in degrees lift_coef: (float) linear approximation of lift coefficient drag_coef: (float) linear approximation of drag coefficient axial_induc_factor: (float) angular_induc_factor: (float) tip_loss_factor: (float) local_power_coef: (float) local power coefficient """ ## Convert all degrees to radians rotor_stats = [] ## Loop over each station for j in range(len(rct_matrix)): ## Calculate method-independent station characteristics local_radius = rct_matrix[j][0] * blade_radius local_chord = rct_matrix[j][1] local_tsr = tip_speed_ratio * rct_matrix[j][0] local_solidity = number_blades * local_chord / (2 * numpy.pi * local_radius) local_pitch = rct_matrix[j][2] + pitch_0 ## Calculate method dependent characteristics if method == "linear": (local_tip_loss, angle_of_attack, angle_of_rwind, lift_coef, drag_coef, axial_induc_factor, angular_induc_factor) =\ linear_method_factors(rct_matrix[j][0], number_blades, local_pitch, local_tsr, lift_curve[0], lift_curve[1], drag_curve[0], drag_curve[1], local_solidity) else: (local_tip_loss, angle_of_attack, angle_of_rwind, lift_coef, drag_coef, axial_induc_factor, angular_induc_factor) = \ nonlinear_method_factors(rct_matrix[j][0], number_blades, local_pitch, local_tsr, lift_curve, drag_curve, local_solidity) ## Calculate local thrust, torque, and power coefficients ## from method dependent results local_thrust, local_torque, local_power_coef = \ rotor_coefs(axial_induc_factor,angular_induc_factor, angle_of_rwind, tip_speed_ratio, local_tsr, len(rct_matrix), local_solidity, lift_coef, drag_coef, local_tip_loss) ## Add stats to results rotor_stats.append([local_radius, local_tip_loss, angle_of_attack, angle_of_rwind, lift_coef, drag_coef, axial_induc_factor, angular_induc_factor, local_power_coef]) ## Convert back to degrees return rotor_stats
|
|
print "Joystick OK!", pygame.joystick.get_count();
|
def __init__(self,width,height): resolution=width,height
|
|
self.keymap=(3,7,1,5,6,8,0,2)
|
def __init__(self,width,height): resolution=width,height
|
|
self.keymap=(7,5,3,1,0,2,6,8)
|
def __init__(self,width,height): resolution=width,height
|
|
print "Button pressed", key, p.cell
|
def move_player(self,p):
|
|
self.screen=pygame.display.set_mode(resolution,pygame.FULLSCREEN)
|
self.screen=pygame.display.set_mode(resolution)
|
def __init__(self,width,height): resolution=width,height
|
return self.get("interface", None)
|
interf = self.get("interface", None) if isinstance(interf, str): return TypeNameInterfaceMap()[interf] else: return interf
|
def get_interface(self): """Gets the interface """ return self.get("interface", None)
|
if actor.block: status = True n = actor.get_nb_output() outputs = [i for i in range(n) if actor.get_output(i) is not None ] if not outputs: status = False return status
|
try: if actor.block: status = True n = actor.get_nb_output() outputs = [i for i in range(n) if actor.get_output(i) is not None ] if not outputs: status = False return status except: pass
|
def is_stopped(self, vid, actor): """ Return True if evaluation must be stop at this vertex """ if vid in self._evaluated: return True
|
return actor.block or vid in self._evaluated
|
stopped = False try: stopped = actor.block or vid in self._evaluated except: pass return stopped
|
def is_stopped(self, vid, actor): """ Return True if evaluation must be stop at this vertex """ return actor.block or vid in self._evaluated
|
Annotation.extend_ad_hoc_slots("htmlText", str, None)
|
Annotation.extend_ad_hoc_slots("textColor", list, None)
|
def initialise_standard_metadata(): """Declares the standard keys used by the Node structures. Called at the end of this file""" #we declare what are the node model ad hoc data we require: AbstractNode.extend_ad_hoc_slots("position", list, [0,0], "posx", "posy") Node.extend_ad_hoc_slots("userColor", list, None, "user_color") Node.extend_ad_hoc_slots("useUserColor", bool, True, "use_user_color", ) Annotation.extend_ad_hoc_slots("text", str, "", "txt") Annotation.extend_ad_hoc_slots("htmlText", str, None) Annotation.extend_ad_hoc_slots("rectP2", tuple, (-1,-1)) Annotation.extend_ad_hoc_slots("color", list, None) #we declare what are the node model ad hoc data we require: AbstractPort.extend_ad_hoc_slots("hide" ,bool, False) AbstractPort.extend_ad_hoc_slots("connectorPosition",list, [0,0])
|
except IndexError : e = None self.__eventQueue = None
|
except IndexError , ex: e = None self.__eventQueue = None
|
def call_notify(self, sender, event=None): """ Basic implementation call directly notify function Sub class can override this method to implement different call strategy (like signal slot) """ if sender == self and event == "PROCESS_QUEUE": if self.__deaf : self.__eventQueue = None elif len(self.__eventQueue) > 0 : e = self.__eventQueue.popleft() while e: self.notify(e[0], e[1]) try : e = self.__eventQueue.popleft() except IndexError : e = None self.__eventQueue = None
|
Annotation.extend_ad_hoc_slots("visualStyle", int, 0)
|
Annotation.extend_ad_hoc_slots("visualStyle", int, None)
|
def initialise_standard_metadata(): """Declares the standard keys used by the Node structures. Called at the end of this file""" #we declare what are the node model ad hoc data we require: AbstractNode.extend_ad_hoc_slots("position", list, [0,0], "posx", "posy") Node.extend_ad_hoc_slots("userColor", list, None, "user_color") Node.extend_ad_hoc_slots("useUserColor", bool, True, "use_user_color", ) Annotation.extend_ad_hoc_slots("text", str, "", "txt")
|
self._metas = {}
|
self._metaValues = {} self._metaTypes = {}
|
def __init__(self, slots=None): observer.Observed.__init__(self) self._metas = {}
|
self._metas = slots._metas.copy()
|
self._metaValues = slots._metaValues.copy() self._metaTypes = slots._metaTypes.copy()
|
def __init__(self, slots=None): observer.Observed.__init__(self) self._metas = {}
|
for name, v in slots.iteritems(): _type, _value = v self.add_metadata(name, _type ) self.set_metadata(name, _value)
|
for name, value in slots.iteritems(): if (isinstance(value, tuple) or isinstance(value, list)) and \ len(value) == 2 and isinstance(value[0],type): typ, val = value else: typ = type(value) if value is not None else None val = value self.add_metadata(name, typ ) self.set_metadata(name, val)
|
def __init__(self, slots=None): observer.Observed.__init__(self) self._metas = {}
|
if(not len(self._metas)): return "{}" stri = "{" for key, val in self._metas.iteritems(): stri = stri+ "\"" + key + "\" : [" + val[0].__name__ + ", " + repr(val[1]) + "]," stri = stri[:-1] + "}" return stri
|
if(not len(self._metaValues)): return "{}" keys = set(self._metaTypes)-set(self._metaValues) d = self._metaValues.copy() for k in keys: d[k] = None return repr(d)
|
def __repr__(self): if(not len(self._metas)): return "{}" stri = "{" for key, val in self._metas.iteritems(): stri = stri+ "\"" + key + "\" : [" + val[0].__name__ + ", " + repr(val[1]) + "]," stri = stri[:-1] + "}" return stri
|
return len(self._metas)
|
return len(self._metaTypes)
|
def __len__(self): return len(self._metas)
|
assert type(valType) == types.TypeType
|
def add_metadata(self, key, valType, notify=True): """Creates a new entry in the meta data registry. The data to set will be of the given 'type' type.""" assert type(valType) == types.TypeType
|
|
if key in self._metas :
|
if key == 'userColor': print key, valType if key in self._metaTypes :
|
def add_metadata(self, key, valType, notify=True): """Creates a new entry in the meta data registry. The data to set will be of the given 'type' type.""" assert type(valType) == types.TypeType
|
self._metas[key] = [valType, None]
|
self._metaTypes[key] = valType
|
def add_metadata(self, key, valType, notify=True): """Creates a new entry in the meta data registry. The data to set will be of the given 'type' type.""" assert type(valType) == types.TypeType
|
if( value == None ): return if key not in self._metas :
|
if value is None : return if key not in self._metaTypes :
|
def set_metadata(self, key, value, notify=True): """Sets the value of a meta data.""" if( value == None ): return if key not in self._metas : raise Exception("This key does not exist : " + key)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.