rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc)
|
model.set(iter, 0, _("Keyboard layout: ") + "<b>%s</b>" % self.keyboard_layout_desc)
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc)
|
model.set(iter, 0, _("Keyboard model: ") + "<b>%s</b>" % self.keyboard_model_desc)
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, "Real name: <b>%s</b>" % realname)
|
model.set(iter, 0, _("Real name: ") + "<b>%s</b>" % realname)
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, "Username: <b>%s</b>" % username)
|
model.set(iter, 0, _("Username: ") + "<b>%s</b>" % username)
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text())
|
model.set(iter, 0, _("Hostname: ") + "<b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text())
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0])))
|
model.set(iter, 0, _("Install bootloader in %s") % ("<b>%s</b>" % grub_model[grub_active][0]))
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1]))))
|
model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s") % (item[0], item[4], item[1])))
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3]))))
|
model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s") % (item[0], item[3])))
|
def show_overview(self): ''' build the summary page ''' model = gtk.TreeStore(str) top = model.append(None) model.set(top, 0, "Filesystem operations") top = model.append(None) model.set(top, 0, _("Localization")) iter = model.append(top) model.set(iter, 0, "Language: <b>%s</b>" % self.locale) iter = model.append(top) model.set(iter, 0, "Keyboard layout: <b>%s</b>" % self.keyboard_layout_desc) iter = model.append(top) model.set(iter, 0, "Keyboard model: <b>%s</b>" % self.keyboard_model_desc) top = model.append(None) model.set(top, 0, _("User settings")) username = self.wTree.get_widget("entry_username").get_text() realname = self.wTree.get_widget("entry_your_name").get_text() iter = model.append(top) model.set(iter, 0, "Real name: <b>%s</b>" % realname) iter = model.append(top) model.set(iter, 0, "Username: <b>%s</b>" % username) top = model.append(None) model.set(top, 0, _("System settings")) iter = model.append(top) model.set(iter, 0, "Hostname: <b>%s</b>" % self.wTree.get_widget("entry_hostname").get_text()) install_grub = self.wTree.get_widget("checkbutton_grub").get_active() grub_box = self.wTree.get_widget("combobox_grub") grub_active = grub_box.get_active() grub_model = grub_box.get_model() iter = model.append(top) if(install_grub): model.set(iter, 0, _("Install bootloader to %s" % ("<b>%s</b>" % grub_model[grub_active][0]))) else: model.set(iter, 0, _("Do not install bootloader")) disks = self.wTree.get_widget("treeview_disks").get_model() for item in disks: if(item[2]): # format it iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Format %s (%s) as %s" % (item[0], item[4], item[1])))) if(item[3] is not None and item[3] is not ""): # mount point iter = model.append(top) model.set(iter, 0, "<b>%s</b>" % (_("Mount %s as %s" % (item[0], item[3])))) self.wTree.get_widget("treeview_overview").set_model(model)
|
self.window.show()
|
def __init__(self, fullscreen=False): self.resource_dir = '/usr/share/live-installer/' #self.glade = 'interface.glade' self.glade = os.path.join(self.resource_dir, 'interface.glade') self.wTree = gtk.glade.XML(self.glade, 'main_window')
|
|
color = style.bg[gtk.STATE_NORMAL] color2 = style.fg[gtk.STATE_NORMAL]
|
def __init__(self, fullscreen=False): self.resource_dir = '/usr/share/live-installer/' #self.glade = 'interface.glade' self.glade = os.path.join(self.resource_dir, 'interface.glade') self.wTree = gtk.glade.XML(self.glade, 'main_window')
|
|
self.wTree.get_widget("eventbox1").modify_bg(gtk.STATE_NORMAL, color)
|
self.wTree.get_widget("eventbox1").realize() self.wTree.get_widget("eventbox1").modify_bg(gtk.STATE_NORMAL, style.bg[gtk.STATE_NORMAL]) self.wTree.get_widget("eventbox1").modify_bg(gtk.STATE_ACTIVE, style.bg[gtk.STATE_ACTIVE])
|
def __init__(self, fullscreen=False): self.resource_dir = '/usr/share/live-installer/' #self.glade = 'interface.glade' self.glade = os.path.join(self.resource_dir, 'interface.glade') self.wTree = gtk.glade.XML(self.glade, 'main_window')
|
self.wTree.get_widget("help_label").modify_fg(gtk.STATE_NORMAL, color2)
|
self.wTree.get_widget("help_label").realize() self.wTree.get_widget("help_label").modify_fg(gtk.STATE_NORMAL, style.fg[gtk.STATE_NORMAL])
|
def __init__(self, fullscreen=False): self.resource_dir = '/usr/share/live-installer/' #self.glade = 'interface.glade' self.glade = os.path.join(self.resource_dir, 'interface.glade') self.wTree = gtk.glade.XML(self.glade, 'main_window')
|
page.breadcrumb_label.modify_fg(gtk.STATE_NORMAL, color2)
|
page.breadcrumb_label.modify_fg(gtk.STATE_NORMAL, style.fg[gtk.STATE_NORMAL])
|
def __init__(self, fullscreen=False): self.resource_dir = '/usr/share/live-installer/' #self.glade = 'interface.glade' self.glade = os.path.join(self.resource_dir, 'interface.glade') self.wTree = gtk.glade.XML(self.glade, 'main_window')
|
self.window.show_all()
|
def __init__(self, fullscreen=False): self.resource_dir = '/usr/share/live-installer/' #self.glade = 'interface.glade' self.glade = os.path.join(self.resource_dir, 'interface.glade') self.wTree = gtk.glade.XML(self.glade, 'main_window')
|
|
if partition.getFlag(parted.PARTITION_SWAP):
|
if partition.type == parted.PARTITION_SWAP:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_RAID):
|
elif partition.type == parted.PARTITION_RAID:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_LVM):
|
elif partition.type == parted.PARTITION_LVM:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_HPSERVICE):
|
elif partition.type == parted.PARTITION_HPSERVICE:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_PALO):
|
elif partition.type == parted.PARTITION_PALO:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_PREP):
|
elif partition.type == parted.PARTITION_PREP:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_MSFT_RESERVED):
|
elif partition.type == parted.PARTITION_MSFT_RESERVED:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_EXTENDED):
|
elif partition.type == parted.PARTITION_EXTENDED:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_LOGICAL):
|
elif partition.type == parted.PARTITION_LOGICAL:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
elif partition.getFlag(parted.PARTITION_FREESPACE):
|
elif partition.type == parted.PARTITION_FREESPACE:
|
def __init__(self, partition): self.partition = partition self.size = partition.getSize() self.start = partition.geometry.start self.end = partition.geometry.end self.description = "" self.used_space = "" if partition.number != -1: self.name = partition.path if partition.fileSystem is None: # no filesystem, check flags if partition.getFlag(parted.PARTITION_SWAP): self.type = ("Linux swap") elif partition.getFlag(parted.PARTITION_RAID): self.type = ("RAID") elif partition.getFlag(parted.PARTITION_LVM): self.type = ("Linux LVM") elif partition.getFlag(parted.PARTITION_HPSERVICE): self.type = ("HP Service") elif partition.getFlag(parted.PARTITION_PALO): self.type = ("PALO") elif partition.getFlag(parted.PARTITION_PREP): self.type = ("PReP") elif partition.getFlag(parted.PARTITION_MSFT_RESERVED): self.type = ("MSFT Reserved") elif partition.getFlag(parted.PARTITION_EXTENDED): self.type = ("Extended Partition") elif partition.getFlag(parted.PARTITION_LOGICAL): self.type = ("Logical Partition") elif partition.getFlag(parted.PARTITION_FREESPACE): self.type = ("Free Space") else: self.type =("Unknown") else: self.type = partition.fileSystem.type else: self.type = "" self.name = _("unallocated")
|
root_device = item item.format = True if(item.format != ""):
|
root_device = item if(item.format is not None and item.format != ""):
|
def install(self): ''' Install this baby to disk ''' # mount the media location. print " --> Installation started" try: if(not os.path.exists("/target")): os.mkdir("/target") if(not os.path.exists("/source")): os.mkdir("/source") # find the squashfs.. root = self.media root_type = self.media_type if(not os.path.exists(root)): print "Base filesystem does not exist! Critical error (exiting)." sys.exit(1) # change to report root_device = None # format partitions as appropriate for item in self.fstab.get_entries(): if(item.mountpoint == "/"): root_device = item item.format = True if(item.format != ""): # well now, we gets to nuke stuff. # report it. should grab the total count of filesystems to be formatted .. self.update_progress(total=4, current=1, pulse=True, message=_("Formatting %s as %s..." % (item.device, item.format))) self.format_device(item.device, item.format) item.filesystem = item.format # mount filesystem print " --> Mounting partitions" self.update_progress(total=4, current=2, message=_("Mounting %s on %s") % (root, "/source/")) print " ------ Mounting %s on %s" % (root, "/source/") self.do_mount(root, "/source/", root_type, options="loop") self.update_progress(total=4, current=3, message=_("Mounting %s on %s") % (root_device.device, "/target/")) print " ------ Mounting %s on %s" % (root_device.device, "/target/") self.do_mount(root_device.device, "/target", root_device.filesystem, None) for item in self.fstab.get_entries(): if(item.mountpoint != "/" and item.mountpoint != "swap"): print " ------ Mounting %s on %s" % (item.device, "/target" + item.mountpoint) os.system("mkdir -p /target" + item.mountpoint) self.do_mount(item.device, "/target" + item.mountpoint, item.filesystem, None) # walk root filesystem. we're too lazy though :P SOURCE = "/source/" DEST = "/target/" directory_times = [] our_total = 0 our_current = -1 os.chdir(SOURCE) # index the files print " --> Indexing files" for top,dirs,files in os.walk(SOURCE, topdown=False): our_total += len(dirs) + len(files) self.update_progress(pulse=True, message=_("Indexing files to be copied..")) our_total += 1 # safenessness print " --> Copying files" for top,dirs,files in os.walk(SOURCE): # Sanity check. Python is a bit schitzo dirpath = top if(dirpath.startswith(SOURCE)): dirpath = dirpath[len(SOURCE):] for name in dirs + files: # following is hacked/copied from Ubiquity rpath = os.path.join(dirpath, name) sourcepath = os.path.join(SOURCE, rpath) targetpath = os.path.join(DEST, rpath) st = os.lstat(sourcepath) mode = stat.S_IMODE(st.st_mode)
|
def add_mount(self, device=None, mountpoint=None, filesystem=None, options=None,format=False):
|
def add_mount(self, device=None, mountpoint=None, filesystem=None, options=None,format=""):
|
def add_mount(self, device=None, mountpoint=None, filesystem=None, options=None,format=False): if(not self.mapping.has_key(device)): self.mapping[device] = fstab_entry(device, mountpoint, filesystem, options) self.mapping[device].format = format
|
gtk.gdk.threads_leave()
|
def build_disks(self): import subprocess self.disks = {} inxi = subprocess.Popen("inxi -c0 -D", shell=True, stdout=subprocess.PIPE) parent = None for line in inxi.stdout: line = line.rstrip("\r\n") if(line.startswith("Disks:")): line = line.replace("Disks:", "") device = None sections = line.split(":") for section in sections: section = section.strip() if("/dev/" in section): device = None elements = section.split() for element in elements: if "/dev/" in element: device = element if elements[len(elements) -1].endswith("GB") or elements[elements[len(elements) -1]].endswith("GB"): size = elements[len(elements) -1] section = section.replace(size, "(%s)" % size) if device is not None: description = section.replace(device, "").strip() description = description.replace(" ", " ") self.disks[device] = description if(parent is None): self.device_node = device radio = gtk.RadioButton(None) radio.connect("clicked", self.select_disk_cb, device) radio.set_label(description) self.wTree.get_widget("vbox_disks").pack_start(radio, expand=False, fill=False) parent = radio else: radio = gtk.RadioButton(parent) radio.connect("clicked", self.select_disk_cb, device) radio.set_label(description) self.wTree.get_widget("vbox_disks").pack_start(radio, expand=False, fill=False) self.wTree.get_widget("vbox_disks").show_all()
|
|
gtk.gdk.threads_leave()
|
def build_partitions(self): self.window.set_sensitive(False) # "busy" cursor. cursor = gtk.gdk.Cursor(gtk.gdk.WATCH) self.window.window.set_cursor(cursor) from progress import ProgressDialog dialog = ProgressDialog() dialog.show(title=_("Installer"), label=_("Scanning disk %s for partitions") % self.device_node) import parted, commands from screen import Partition os.popen('mkdir -p /tmp/live-installer/tmpmount') # disks that you can install grub to grub_model = gtk.ListStore(str) hdd_descriptions = [] inxi = commands.getoutput("inxi -D -c 0") parts = inxi.split(":") partitions = [] path = self.device_node # i.e. /dev/sda grub_model.append([path]) device = parted.getDevice(path) disk = parted.Disk(device) partition = disk.getFirstPartition() last_added_partition = Partition(partition) partitions.append(last_added_partition) partition = partition.nextPartition() while (partition is not None): if last_added_partition.partition.number == -1 and partition.number == -1: last_added_partition.add_partition(partition) else: last_added_partition = Partition(partition) partitions.append(last_added_partition) if partition.number != -1 and "swap" not in last_added_partition.type: #Umount temp folder if ('/tmp/live-installer/tmpmount' in commands.getoutput('mount')): os.popen('umount /tmp/live-installer/tmpmount') #Mount partition if not mounted if (partition.path not in commands.getoutput('mount')): os.system("mount %s /tmp/live-installer/tmpmount" % partition.path) #Identify partition's description and used space if (partition.path in commands.getoutput('mount')): last_added_partition.used_space = commands.getoutput("df | grep %s | awk {'print $5'}" % partition.path) mount_point = commands.getoutput("df | grep %s | awk {'print $6'}" % partition.path) if os.path.exists(os.path.join(mount_point, 'etc/lsb-release')): last_added_partition.description = commands.getoutput("cat " + os.path.join(mount_point, 'etc/lsb-release') + " | grep DISTRIB_DESCRIPTION").replace('DISTRIB_DESCRIPTION', '').replace('=', '').replace('"', '').strip() elif os.path.exists(os.path.join(mount_point, 'etc/issue')): last_added_partition.description = commands.getoutput("cat " + os.path.join(mount_point, 'etc/issue')).replace('\\n', '').replace('\l', '').strip() elif os.path.exists(os.path.join(mount_point, 'Windows/servicing/Version')): version = commands.getoutput("ls %s" % os.path.join(mount_point, 'Windows/servicing/Version')) if version.startswith("6.1"): last_added_partition.description = "Windows 7" elif version.startswith("6.0"): last_added_partition.description = "Windows Vista" elif version.startswith("5.1") or version.startswith("5.2"): last_added_partition.description = "Windows XP" elif version.startswith("5.0"): last_added_partition.description = "Windows 2000" elif version.startswith("4.90"): last_added_partition.description = "Windows Me" elif version.startswith("4.1"): last_added_partition.description = "Windows 98" elif version.startswith("4.0.1381"): last_added_partition.description = "Windows NT" elif version.startswith("4.0.950"): last_added_partition.description = "Windows 95" elif os.path.exists(os.path.join(mount_point, 'Boot/BCD')): if os.system("grep -qs \"V.i.s.t.a\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows Vista bootloader" elif os.system("grep -qs \"W.i.n.d.o.w.s. .7\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows 7 bootloader" elif os.system("grep -qs \"W.i.n.d.o.w.s. .R.e.c.o.v.e.r.y. .E.n.v.i.r.o.n.m.e.n.t\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows recovery" elif os.system("grep -qs \"W.i.n.d.o.w.s. .S.e.r.v.e.r. .2.0.0.8\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows Server 2008 bootloader" else: last_added_partition.description = "Windows bootloader" elif os.path.exists(os.path.join(mount_point, 'Windows/System32')): last_added_partition.description = "Windows" #Umount temp folder if ('/tmp/live-installer/tmpmount' in commands.getoutput('mount')): os.popen('umount /tmp/live-installer/tmpmount') partition = partition.nextPartition() from screen import Screen myScreen = Screen(partitions) self.part_screen = myScreen kids = self.wTree.get_widget("vbox_cairo").get_children() if(kids is not None): for sprog in kids: self.wTree.get_widget("vbox_cairo").remove(sprog) self.wTree.get_widget("vbox_cairo").add(myScreen) self.wTree.get_widget("vbox_cairo").show_all() color = self.wTree.get_widget("notebook1").style.bg[gtk.STATE_ACTIVE] self.part_screen.modify_bg(gtk.STATE_NORMAL, color) model = gtk.ListStore(str,str,bool,str,str,bool, str, str, str) model2 = gtk.ListStore(str) for partition in partitions: if partition.size > 0.5: if partition.partition.number == -1: model.append(["<small><span foreground='#555555'>" + partition.name + "</span></small>", partition.type, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) elif partition.real_type == parted.PARTITION_EXTENDED: print "Extended partition" model.append(["<small><span foreground='#555555'>extended partition</span></small>", None, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) else: if partition.description != "": model.append([partition.name, "%s (%s)" % (partition.description, partition.type), False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) else: model.append([partition.name, partition.type, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) self.wTree.get_widget("treeview_disks").set_model(model) self.wTree.get_widget("combobox_grub").set_model(grub_model) self.wTree.get_widget("combobox_grub").set_active(0) dialog.hide() self.window.set_sensitive(True) self.window.window.set_cursor(None)
|
|
gtk.gdk.threads_enter()
|
def build_partitions(self): self.window.set_sensitive(False) # "busy" cursor. cursor = gtk.gdk.Cursor(gtk.gdk.WATCH) self.window.window.set_cursor(cursor) from progress import ProgressDialog dialog = ProgressDialog() dialog.show(title=_("Installer"), label=_("Scanning disk %s for partitions") % self.device_node) import parted, commands from screen import Partition os.popen('mkdir -p /tmp/live-installer/tmpmount') # disks that you can install grub to grub_model = gtk.ListStore(str) hdd_descriptions = [] inxi = commands.getoutput("inxi -D -c 0") parts = inxi.split(":") partitions = [] path = self.device_node # i.e. /dev/sda grub_model.append([path]) device = parted.getDevice(path) disk = parted.Disk(device) partition = disk.getFirstPartition() last_added_partition = Partition(partition) partitions.append(last_added_partition) partition = partition.nextPartition() while (partition is not None): if last_added_partition.partition.number == -1 and partition.number == -1: last_added_partition.add_partition(partition) else: last_added_partition = Partition(partition) partitions.append(last_added_partition) if partition.number != -1 and "swap" not in last_added_partition.type: #Umount temp folder if ('/tmp/live-installer/tmpmount' in commands.getoutput('mount')): os.popen('umount /tmp/live-installer/tmpmount') #Mount partition if not mounted if (partition.path not in commands.getoutput('mount')): os.system("mount %s /tmp/live-installer/tmpmount" % partition.path) #Identify partition's description and used space if (partition.path in commands.getoutput('mount')): last_added_partition.used_space = commands.getoutput("df | grep %s | awk {'print $5'}" % partition.path) mount_point = commands.getoutput("df | grep %s | awk {'print $6'}" % partition.path) if os.path.exists(os.path.join(mount_point, 'etc/lsb-release')): last_added_partition.description = commands.getoutput("cat " + os.path.join(mount_point, 'etc/lsb-release') + " | grep DISTRIB_DESCRIPTION").replace('DISTRIB_DESCRIPTION', '').replace('=', '').replace('"', '').strip() elif os.path.exists(os.path.join(mount_point, 'etc/issue')): last_added_partition.description = commands.getoutput("cat " + os.path.join(mount_point, 'etc/issue')).replace('\\n', '').replace('\l', '').strip() elif os.path.exists(os.path.join(mount_point, 'Windows/servicing/Version')): version = commands.getoutput("ls %s" % os.path.join(mount_point, 'Windows/servicing/Version')) if version.startswith("6.1"): last_added_partition.description = "Windows 7" elif version.startswith("6.0"): last_added_partition.description = "Windows Vista" elif version.startswith("5.1") or version.startswith("5.2"): last_added_partition.description = "Windows XP" elif version.startswith("5.0"): last_added_partition.description = "Windows 2000" elif version.startswith("4.90"): last_added_partition.description = "Windows Me" elif version.startswith("4.1"): last_added_partition.description = "Windows 98" elif version.startswith("4.0.1381"): last_added_partition.description = "Windows NT" elif version.startswith("4.0.950"): last_added_partition.description = "Windows 95" elif os.path.exists(os.path.join(mount_point, 'Boot/BCD')): if os.system("grep -qs \"V.i.s.t.a\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows Vista bootloader" elif os.system("grep -qs \"W.i.n.d.o.w.s. .7\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows 7 bootloader" elif os.system("grep -qs \"W.i.n.d.o.w.s. .R.e.c.o.v.e.r.y. .E.n.v.i.r.o.n.m.e.n.t\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows recovery" elif os.system("grep -qs \"W.i.n.d.o.w.s. .S.e.r.v.e.r. .2.0.0.8\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows Server 2008 bootloader" else: last_added_partition.description = "Windows bootloader" elif os.path.exists(os.path.join(mount_point, 'Windows/System32')): last_added_partition.description = "Windows" #Umount temp folder if ('/tmp/live-installer/tmpmount' in commands.getoutput('mount')): os.popen('umount /tmp/live-installer/tmpmount') partition = partition.nextPartition() from screen import Screen myScreen = Screen(partitions) self.part_screen = myScreen kids = self.wTree.get_widget("vbox_cairo").get_children() if(kids is not None): for sprog in kids: self.wTree.get_widget("vbox_cairo").remove(sprog) self.wTree.get_widget("vbox_cairo").add(myScreen) self.wTree.get_widget("vbox_cairo").show_all() color = self.wTree.get_widget("notebook1").style.bg[gtk.STATE_ACTIVE] self.part_screen.modify_bg(gtk.STATE_NORMAL, color) model = gtk.ListStore(str,str,bool,str,str,bool, str, str, str) model2 = gtk.ListStore(str) for partition in partitions: if partition.size > 0.5: if partition.partition.number == -1: model.append(["<small><span foreground='#555555'>" + partition.name + "</span></small>", partition.type, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) elif partition.real_type == parted.PARTITION_EXTENDED: print "Extended partition" model.append(["<small><span foreground='#555555'>extended partition</span></small>", None, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) else: if partition.description != "": model.append([partition.name, "%s (%s)" % (partition.description, partition.type), False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) else: model.append([partition.name, partition.type, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) self.wTree.get_widget("treeview_disks").set_model(model) self.wTree.get_widget("combobox_grub").set_model(grub_model) self.wTree.get_widget("combobox_grub").set_active(0) dialog.hide() self.window.set_sensitive(True) self.window.window.set_cursor(None)
|
|
self.wTree.get_widget("combobox_grub").set_active(0)
|
self.wTree.get_widget("combobox_grub").set_active(0) gtk.gdk.threads_leave()
|
def build_partitions(self): self.window.set_sensitive(False) # "busy" cursor. cursor = gtk.gdk.Cursor(gtk.gdk.WATCH) self.window.window.set_cursor(cursor) from progress import ProgressDialog dialog = ProgressDialog() dialog.show(title=_("Installer"), label=_("Scanning disk %s for partitions") % self.device_node) import parted, commands from screen import Partition os.popen('mkdir -p /tmp/live-installer/tmpmount') # disks that you can install grub to grub_model = gtk.ListStore(str) hdd_descriptions = [] inxi = commands.getoutput("inxi -D -c 0") parts = inxi.split(":") partitions = [] path = self.device_node # i.e. /dev/sda grub_model.append([path]) device = parted.getDevice(path) disk = parted.Disk(device) partition = disk.getFirstPartition() last_added_partition = Partition(partition) partitions.append(last_added_partition) partition = partition.nextPartition() while (partition is not None): if last_added_partition.partition.number == -1 and partition.number == -1: last_added_partition.add_partition(partition) else: last_added_partition = Partition(partition) partitions.append(last_added_partition) if partition.number != -1 and "swap" not in last_added_partition.type: #Umount temp folder if ('/tmp/live-installer/tmpmount' in commands.getoutput('mount')): os.popen('umount /tmp/live-installer/tmpmount') #Mount partition if not mounted if (partition.path not in commands.getoutput('mount')): os.system("mount %s /tmp/live-installer/tmpmount" % partition.path) #Identify partition's description and used space if (partition.path in commands.getoutput('mount')): last_added_partition.used_space = commands.getoutput("df | grep %s | awk {'print $5'}" % partition.path) mount_point = commands.getoutput("df | grep %s | awk {'print $6'}" % partition.path) if os.path.exists(os.path.join(mount_point, 'etc/lsb-release')): last_added_partition.description = commands.getoutput("cat " + os.path.join(mount_point, 'etc/lsb-release') + " | grep DISTRIB_DESCRIPTION").replace('DISTRIB_DESCRIPTION', '').replace('=', '').replace('"', '').strip() elif os.path.exists(os.path.join(mount_point, 'etc/issue')): last_added_partition.description = commands.getoutput("cat " + os.path.join(mount_point, 'etc/issue')).replace('\\n', '').replace('\l', '').strip() elif os.path.exists(os.path.join(mount_point, 'Windows/servicing/Version')): version = commands.getoutput("ls %s" % os.path.join(mount_point, 'Windows/servicing/Version')) if version.startswith("6.1"): last_added_partition.description = "Windows 7" elif version.startswith("6.0"): last_added_partition.description = "Windows Vista" elif version.startswith("5.1") or version.startswith("5.2"): last_added_partition.description = "Windows XP" elif version.startswith("5.0"): last_added_partition.description = "Windows 2000" elif version.startswith("4.90"): last_added_partition.description = "Windows Me" elif version.startswith("4.1"): last_added_partition.description = "Windows 98" elif version.startswith("4.0.1381"): last_added_partition.description = "Windows NT" elif version.startswith("4.0.950"): last_added_partition.description = "Windows 95" elif os.path.exists(os.path.join(mount_point, 'Boot/BCD')): if os.system("grep -qs \"V.i.s.t.a\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows Vista bootloader" elif os.system("grep -qs \"W.i.n.d.o.w.s. .7\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows 7 bootloader" elif os.system("grep -qs \"W.i.n.d.o.w.s. .R.e.c.o.v.e.r.y. .E.n.v.i.r.o.n.m.e.n.t\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows recovery" elif os.system("grep -qs \"W.i.n.d.o.w.s. .S.e.r.v.e.r. .2.0.0.8\" " + os.path.join(mount_point, 'Boot/BCD')) == 0: last_added_partition.description = "Windows Server 2008 bootloader" else: last_added_partition.description = "Windows bootloader" elif os.path.exists(os.path.join(mount_point, 'Windows/System32')): last_added_partition.description = "Windows" #Umount temp folder if ('/tmp/live-installer/tmpmount' in commands.getoutput('mount')): os.popen('umount /tmp/live-installer/tmpmount') partition = partition.nextPartition() from screen import Screen myScreen = Screen(partitions) self.part_screen = myScreen kids = self.wTree.get_widget("vbox_cairo").get_children() if(kids is not None): for sprog in kids: self.wTree.get_widget("vbox_cairo").remove(sprog) self.wTree.get_widget("vbox_cairo").add(myScreen) self.wTree.get_widget("vbox_cairo").show_all() color = self.wTree.get_widget("notebook1").style.bg[gtk.STATE_ACTIVE] self.part_screen.modify_bg(gtk.STATE_NORMAL, color) model = gtk.ListStore(str,str,bool,str,str,bool, str, str, str) model2 = gtk.ListStore(str) for partition in partitions: if partition.size > 0.5: if partition.partition.number == -1: model.append(["<small><span foreground='#555555'>" + partition.name + "</span></small>", partition.type, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) elif partition.real_type == parted.PARTITION_EXTENDED: print "Extended partition" model.append(["<small><span foreground='#555555'>extended partition</span></small>", None, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) else: if partition.description != "": model.append([partition.name, "%s (%s)" % (partition.description, partition.type), False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) else: model.append([partition.name, partition.type, False, None, '%.0f' % round(partition.size, 0), False, partition.start, partition.end, partition.used_space]) self.wTree.get_widget("treeview_disks").set_model(model) self.wTree.get_widget("combobox_grub").set_model(grub_model) self.wTree.get_widget("combobox_grub").set_active(0) dialog.hide() self.window.set_sensitive(True) self.window.window.set_cursor(None)
|
elif(self == self.PAGE_OVERVIEW):
|
elif(sel == self.PAGE_OVERVIEW):
|
def wizard_cb(self, widget, goback, data=None): ''' wizard buttons ''' sel = self.wTree.get_widget("notebook1").get_current_page() # check each page for errors if(not goback): if(sel == self.PAGE_LANGUAGE): self.activate_page(self.PAGE_KEYBOARD) elif(sel == self.PAGE_KEYBOARD): self.activate_page(self.PAGE_PARTITIONS) notebook = self.wTree.get_widget("notebook_disks") if len(self.disks) == 1: notebook.set_current_page(1) thr = threading.Thread(name="live-installer-disk-search", group=None, target=self.build_partitions, args=(), kwargs={}) thr.start() else: notebook.set_current_page(0) elif(sel == self.PAGE_PARTITIONS): notebook = self.wTree.get_widget("notebook_disks") if notebook.get_current_page() == 0: notebook.set_current_page(1) thr = threading.Thread(name="live-installer-disk-search", group=None, target=self.build_partitions, args=(), kwargs={}) thr.start() else: model = self.wTree.get_widget("treeview_disks").get_model() found_root = False for row in model: mountpoint = row[3] if(mountpoint == "/"): found_root = True if(not found_root): MessageDialog(_("Installation Tool"), _("Please select a root (/) partition before proceeding"), gtk.MESSAGE_ERROR).show() else: self.activate_page(self.PAGE_USER) elif(sel == self.PAGE_USER): username = self.wTree.get_widget("entry_username").get_text() if(username == ""): MessageDialog(_("Installation Tool"), _("Please provide a username"), gtk.MESSAGE_ERROR).show() else: # username valid? for char in username: if(char.isupper()): MessageDialog(_("Installation Tool"), _("Your username must be lower case"), gtk.MESSAGE_WARNING).show() elif(char.isspace()): MessageDialog(_("Installation Tool"), _("Your username may not contain whitespace"), gtk.MESSAGE_WARNING).show() else: password1 = self.wTree.get_widget("entry_userpass1").get_text() password2 = self.wTree.get_widget("entry_userpass2").get_text() if(password1 == ""): MessageDialog(_("Installation Tool"), _("Please provide a password for your user account"), gtk.MESSAGE_WARNING).show() elif(password1 != password2): MessageDialog(_("Installation Tool"), _("Your passwords do not match"), gtk.MESSAGE_ERROR).show() else: self.activate_page(self.PAGE_ADVANCED) elif(sel == self.PAGE_ADVANCED): self.activate_page(self.PAGE_OVERVIEW) self.show_overview() self.wTree.get_widget("treeview_overview").expand_all() elif(self == self.PAGE_OVERVIEW): self.activate_page(self.PAGE_INSTALL) # do install self.wTree.get_widget("button_next").hide() self.wTree.get_widget("button_back").hide() thr = threading.Thread(name="live-install", group=None, args=(), kwargs={}, target=self.do_install) thr.start() self.wTree.get_widget("button_back").set_sensitive(True) else: if(sel == self.PAGE_OVERVIEW): self.activate_page(self.PAGE_ADVANCED) if(sel == self.PAGE_ADVANCED): self.activate_page(self.PAGE_USER) if(sel == self.PAGE_USER): self.activate_page(self.PAGE_PARTITIONS) notebook = self.wTree.get_widget("notebook_disks") if len(self.disks) == 1: notebook.set_current_page(1) thr = threading.Thread(name="live-installer-disk-search", group=None, target=self.build_partitions, args=(), kwargs={}) thr.start() else: notebook.set_current_page(0) if(sel == self.PAGE_PARTITIONS): self.activate_page(self.PAGE_KEYBOARD) if(sel == self.PAGE_KEYBOARD): self.activate_page(self.PAGE_LANGUAGE) self.wTree.get_widget("button_back").set_sensitive(False)
|
gtk.gdk.threads_enter()
|
def pbar_pulse(): if(not self.should_pulse): return False self.wTree.get_widget("progressbar").pulse() return self.should_pulse
|
|
gtk.gdk.threads_leave()
|
def pbar_pulse(): if(not self.should_pulse): return False self.wTree.get_widget("progressbar").pulse() return self.should_pulse
|
|
self.configure_grub()
|
self.configure_grub(our_total, our_current)
|
def install(self): ''' Install this baby to disk ''' # mount the media location. print " --> Installation started" try: if(not os.path.exists("/target")): os.mkdir("/target") if(not os.path.exists("/source")): os.mkdir("/source") # find the squashfs.. root = self.media root_type = self.media_type if(not os.path.exists(root)): print "Base filesystem does not exist! Critical error (exiting)." sys.exit(1) # change to report root_device = None # format partitions as appropriate for item in self.fstab.get_entries(): if(item.mountpoint == "/"): root_device = item item.format = True if(item.format): # well now, we gets to nuke stuff. # report it. should grab the total count of filesystems to be formatted .. self.update_progress(total=4, current=1, pulse=True, message=_("Formatting %s as %s..." % (item.device, item.filesystem))) self.format_device(item.device, item.filesystem) # mount filesystem print " --> Mounting partitions" self.update_progress(total=4, current=2, message=_("Mounting %s on %s") % (root, "/source/")) self.do_mount(root, "/source/", root_type, options="loop") self.update_progress(total=4, current=3, message=_("Mounting %s on %s") % (root_device.device, "/target/")) self.do_mount(root_device.device, "/target", root_device.filesystem, None) # walk root filesystem. we're too lazy though :P SOURCE = "/source/" DEST = "/target/" directory_times = [] our_total = 0 our_current = -1 os.chdir(SOURCE) # index the files print " --> Indexing files" for top,dirs,files in os.walk(SOURCE, topdown=False): our_total += len(dirs) + len(files) self.update_progress(pulse=True, message=_("Indexing files to be copied..")) our_total += 1 # safenessness print " --> Copying files" for top,dirs,files in os.walk(SOURCE): # Sanity check. Python is a bit schitzo dirpath = top if(dirpath.startswith(SOURCE)): dirpath = dirpath[len(SOURCE):] for name in dirs + files: # following is hacked/copied from Ubiquity rpath = os.path.join(dirpath, name) sourcepath = os.path.join(SOURCE, rpath) targetpath = os.path.join(DEST, rpath) st = os.lstat(sourcepath) mode = stat.S_IMODE(st.st_mode)
|
while (not self.check_grub()): self.configure_grub()
|
while (not self.check_grub(our_total, our_current)): self.configure_grub(our_total, our_current)
|
def install(self): ''' Install this baby to disk ''' # mount the media location. print " --> Installation started" try: if(not os.path.exists("/target")): os.mkdir("/target") if(not os.path.exists("/source")): os.mkdir("/source") # find the squashfs.. root = self.media root_type = self.media_type if(not os.path.exists(root)): print "Base filesystem does not exist! Critical error (exiting)." sys.exit(1) # change to report root_device = None # format partitions as appropriate for item in self.fstab.get_entries(): if(item.mountpoint == "/"): root_device = item item.format = True if(item.format): # well now, we gets to nuke stuff. # report it. should grab the total count of filesystems to be formatted .. self.update_progress(total=4, current=1, pulse=True, message=_("Formatting %s as %s..." % (item.device, item.filesystem))) self.format_device(item.device, item.filesystem) # mount filesystem print " --> Mounting partitions" self.update_progress(total=4, current=2, message=_("Mounting %s on %s") % (root, "/source/")) self.do_mount(root, "/source/", root_type, options="loop") self.update_progress(total=4, current=3, message=_("Mounting %s on %s") % (root_device.device, "/target/")) self.do_mount(root_device.device, "/target", root_device.filesystem, None) # walk root filesystem. we're too lazy though :P SOURCE = "/source/" DEST = "/target/" directory_times = [] our_total = 0 our_current = -1 os.chdir(SOURCE) # index the files print " --> Indexing files" for top,dirs,files in os.walk(SOURCE, topdown=False): our_total += len(dirs) + len(files) self.update_progress(pulse=True, message=_("Indexing files to be copied..")) our_total += 1 # safenessness print " --> Copying files" for top,dirs,files in os.walk(SOURCE): # Sanity check. Python is a bit schitzo dirpath = top if(dirpath.startswith(SOURCE)): dirpath = dirpath[len(SOURCE):] for name in dirs + files: # following is hacked/copied from Ubiquity rpath = os.path.join(dirpath, name) sourcepath = os.path.join(SOURCE, rpath) targetpath = os.path.join(DEST, rpath) st = os.lstat(sourcepath) mode = stat.S_IMODE(st.st_mode)
|
def configure_grub(self):
|
def configure_grub(self, our_total, our_current):
|
def configure_grub(self): self.update_progress(pulse=True, total=our_total, current=our_current, message=_("Configuring bootloader")) print " --> Running grub-mkconfig" self.run_in_chroot("grub-mkconfig -o /boot/grub/grub.cfg")
|
def check_grub(self):
|
def check_grub(self, our_total, our_current):
|
def check_grub(self): self.update_progress(pulse=True, total=our_total, current=our_current, message=_("Checking bootloader")) print " --> Checking Grub configuration" time.sleep(2) found_theme = False found_entry = False grubfh = open("/boot/grub/grub.cfg", "r") for line in grubfh: line = line.rstrip("\r\n") if("/boot/grub/linuxmint.png" in line): found_theme = True if ("menuentry" in line and "Mint" in line): found_entry = True print " --> Found Grub entry: %s " % line grubfh.close() return (found_theme and found_entry)
|
PATH = "%s/share/" % PREFIX
|
PATH = "%s/share/doc/specto/" % PREFIX
|
def get_path(category=None): """ Return the correct path. """ if not os.path.exists('data') or not os.path.exists('spectlib'): if not category: PATH = "%s/share/specto/" % PREFIX elif category=="doc": PATH = "%s/share/" % PREFIX elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) else: if not category: PATH =os.path.join(os.getcwd(), "data/") elif category=="doc": PATH = os.path.join(os.getcwd(), "") elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) if category == "specto": try: PATH = os.path.join(os.environ['XDG_CONFIG_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".config", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec if category == "tmp": try: PATH = os.path.join(os.environ['XDG_CACHE_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".cache", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec return PATH
|
%s/share/specto/icons/hicolor''' % prefix
|
%s/share/icons/hicolor''' % prefix
|
def give_files(dir, *extension): files=[] all_files=os.listdir(dir) for file in all_files: ext=(os.path.splitext(file))[1] if ext in extension: files.append(dir + file) return files
|
('share/specto/glade', give_files('data/glade/', '.glade')),
|
('share/specto/uis', give_files('data/uis/', '.ui')),
|
def give_mo_tuples(langs): mo_tuple_list=[] for lang in langs.split(' '): mo_tuple_list.append((give_mo_path(lang), [give_mo_file(lang)])) return mo_tuple_list
|
SID = greader.login() feed_db = greader.get_unread_items(SID)
|
auth = greader.login() feed_db = greader.get_unread_items(auth)
|
def check(self): """ Check for new news on your greader account. """ try: self.newMsg = 0 self.unreadMsg = 0 greader = Greader(self.username, self.password, "specto") SID = greader.login() feed_db = greader.get_unread_items(SID) for feed in feed_db: self.unreadMsg += feed.messages if feed.messages > 0 and self.news_info.add(feed): self.actually_changed = True self.newMsg += feed.messages if self.unreadMsg == 0:#no unread items, we need to clear the watch self.mark_as_read() self.news_info = Feed_collection() else: if self.unreadMsg == 1000: self.or_more = _(" or more") self.write_cache_file()
|
return re.search('SID=(\S*)', result).group(1)
|
return re.search('Auth=(\S*)', result).group(1)
|
def login(self): #login / get SED header = {'User-agent' : self.source} post_data = urllib.urlencode({ 'Email': self.user, 'Passwd': self.password, 'service': 'reader', 'source': self.source, 'continue': self.google_url, }) request = urllib2.Request(self.login_url, post_data, header) try : f = urllib2.urlopen( request ) result = f.read() except: raise Exception('Error logging in') return re.search('SID=(\S*)', result).group(1)
|
def get_results(self, SID, url):
|
def get_results(self, auth, url):
|
def get_results(self, SID, url): #get results from url header = {'User-agent' : self.source} header['Cookie']='Name=SID;SID=%s;Domain=.google.com;Path=/;Expires=160000000000' % SID request = urllib2.Request(url, None, header) try : f = urllib2.urlopen( request ) result = f.read() except: raise Exception('Error getting data from %s' % url) return result
|
header['Cookie']='Name=SID;SID=%s;Domain=.google.com;Path=/;Expires=160000000000' % SID
|
header['Authorization']='GoogleLogin auth=%s' % auth
|
def get_results(self, SID, url): #get results from url header = {'User-agent' : self.source} header['Cookie']='Name=SID;SID=%s;Domain=.google.com;Path=/;Expires=160000000000' % SID request = urllib2.Request(url, None, header) try : f = urllib2.urlopen( request ) result = f.read() except: raise Exception('Error getting data from %s' % url) return result
|
def get_unread_items(self, SID):
|
def get_unread_items(self, auth):
|
def get_unread_items(self, SID): feed_db = [] data = self.get_results(SID, self.read_items_url) feed_data = self.list_feeds(SID) node = ET.XML(data) feed_node = ET.XML(feed_data) total_unread = 0 node = node.find("list") feed_node = feed_node.find("list") for o in node.findall("object"): feed = "" total_unread = 0 feed_title = "" for n in o.findall("string"): if (n.attrib["name"] == "id"): feed = n.text for n in o.findall("number"): if (n.attrib["name"] == "count"): total_unread = int(n.text) if feed[0:5] != "user/": for x in feed_node.findall("object"): found = False for y in x.findall("string"): if(y.attrib["name"] == "id" and y.text == feed): found = True if(y.attrib["name"] == "title" and found == True): feed_title = y.text if feed_title != "" and total_unread > 0: f = Feed(feed_title, total_unread) feed_db.append(f) return feed_db
|
data = self.get_results(SID, self.read_items_url) feed_data = self.list_feeds(SID)
|
data = self.get_results(auth, self.read_items_url) feed_data = self.list_feeds(auth)
|
def get_unread_items(self, SID): feed_db = [] data = self.get_results(SID, self.read_items_url) feed_data = self.list_feeds(SID) node = ET.XML(data) feed_node = ET.XML(feed_data) total_unread = 0 node = node.find("list") feed_node = feed_node.find("list") for o in node.findall("object"): feed = "" total_unread = 0 feed_title = "" for n in o.findall("string"): if (n.attrib["name"] == "id"): feed = n.text for n in o.findall("number"): if (n.attrib["name"] == "count"): total_unread = int(n.text) if feed[0:5] != "user/": for x in feed_node.findall("object"): found = False for y in x.findall("string"): if(y.attrib["name"] == "id" and y.text == feed): found = True if(y.attrib["name"] == "title" and found == True): feed_title = y.text if feed_title != "" and total_unread > 0: f = Feed(feed_title, total_unread) feed_db.append(f) return feed_db
|
return self.notifier.flags() & gtk.VISIBLE
|
return bool(self.notifier.flags() & gtk.VISIBLE)
|
def get_state(self): """ Return True if the notifier window is visible. """ return self.notifier.flags() & gtk.VISIBLE
|
self.playing = False
|
def __init__(self): self.player = None
|
|
open = True
|
use_standard_command = True
|
def add_clicked(self, widget): """ Add the watch to the watches repository. """ values = {} #get the standard options from a watch values['name'] = self.name.get_text()
|
if open == True:
|
if use_standard_command:
|
def add_clicked(self, widget): """ Add the watch to the watches repository. """ values = {} #get the standard options from a watch values['name'] = self.name.get_text()
|
Watch class that will check if you recevied a new mail on your pop3 account.
|
Watch class that will check if you recevied a new mail on your IMAP account.
|
def get_add_gui_info(): return [("username", spectlib.gtkconfig.Entry(_("Username"))), ("password", spectlib.gtkconfig.PasswordEntry(_("Password"))), ("host", spectlib.gtkconfig.Entry(_("Host"))), ("ssl", spectlib.gtkconfig.CheckButton(_("Use SSL"))), ("folder", spectlib.gtkconfig.Entry(_("Folder (optional)")))]
|
PATH = "%s/share/specto/" % sys.prefix
|
PATH = "%s/share/specto/" % PREFIX
|
def get_path(category=None): """ Return the correct path. """ if not os.path.exists('data') or not os.path.exists('spectlib'): if not category: PATH = "%s/share/specto/" % sys.prefix elif category=="doc": PATH = "%s/share/doc/specto/" % sys.prefix elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) else: if not category: PATH =os.path.join(os.getcwd(), "data/") elif category=="doc": PATH = os.path.join(os.getcwd(), "data/doc/") elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) if category == "specto": try: PATH = os.path.join(os.environ['XDG_CONFIG_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".config", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec if category == "tmp": try: PATH = os.path.join(os.environ['XDG_CACHE_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".cache", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec return PATH
|
PATH = "%s/share/doc/specto/" % sys.prefix
|
PATH = "%s/share/" % PREFIX
|
def get_path(category=None): """ Return the correct path. """ if not os.path.exists('data') or not os.path.exists('spectlib'): if not category: PATH = "%s/share/specto/" % sys.prefix elif category=="doc": PATH = "%s/share/doc/specto/" % sys.prefix elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) else: if not category: PATH =os.path.join(os.getcwd(), "data/") elif category=="doc": PATH = os.path.join(os.getcwd(), "data/doc/") elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) if category == "specto": try: PATH = os.path.join(os.environ['XDG_CONFIG_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".config", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec if category == "tmp": try: PATH = os.path.join(os.environ['XDG_CACHE_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".cache", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec return PATH
|
PATH = os.path.join(os.getcwd(), "data/doc/")
|
PATH = os.path.join(os.getcwd(), "")
|
def get_path(category=None): """ Return the correct path. """ if not os.path.exists('data') or not os.path.exists('spectlib'): if not category: PATH = "%s/share/specto/" % sys.prefix elif category=="doc": PATH = "%s/share/doc/specto/" % sys.prefix elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) else: if not category: PATH =os.path.join(os.getcwd(), "data/") elif category=="doc": PATH = os.path.join(os.getcwd(), "data/doc/") elif category=="src": PATH = os.path.dirname(os.path.abspath(__file__)) if category == "specto": try: PATH = os.path.join(os.environ['XDG_CONFIG_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".config", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec if category == "tmp": try: PATH = os.path.join(os.environ['XDG_CACHE_HOME'], "specto") except KeyError: PATH = os.path.join(os.environ['HOME'], ".cache", "specto") if not os.path.exists(PATH): os.makedirs(PATH) os.chmod(PATH, 0700) # Meet XDG spec return PATH
|
return '%s/%s' % (THEME_URL, self.directory)
|
return '%s/%s' % (theme_url, self.directory)
|
def base_url(self): return '%s/%s' % (THEME_URL, self.directory)
|
map(lambda (k,v): k + "," + v), hints)
|
map(lambda (k,v): k + "," + v, hintCopy.items()))
|
def __init__(self, query, hints, pmap, reportError, resultName=""): self.queryStr = query.strip()# Pull trailing whitespace # Force semicolon to facilitate worker-side splitting if self.queryStr[-1] != ";": # Add terminal semicolon self.queryStr += ";" self.queryHash = hashlib.md5(self.queryStr).hexdigest()[:18] self._dbContext = "LSST" # Later adjusted by hints.
|
qConfig["table.defaultDb"] = self._dbContext
|
qConfig["table.defaultdb"] = self._dbContext
|
def __init__(self, query, hints, pmap, reportError, resultName=""): self.queryStr = query.strip()# Pull trailing whitespace # Force semicolon to facilitate worker-side splitting if self.queryStr[-1] != ";": # Add terminal semicolon self.queryStr += ";" self.queryHash = hashlib.md5(self.queryStr).hexdigest()[:18] self._dbContext = "LSST" # Later adjusted by hints.
|
print "Chunk %d to %s (%s)" % (chunk, saveName, table)
|
def submit(self, chunk, table, q): saveName = self._saveName(chunk) handle = submitQuery(self._sessionId, chunk, q, saveName, table) self._inFlight[chunk] = (handle, table) print "Chunk %d to %s (%s)" % (chunk, saveName, table)
|
|
print "Affected chunks: ", [x[0] for x in self._intersectIter]
|
def _evaluateHints(self, hints, pmap): """Modify self.fullSky and self.partitionCoverage according to spatial hints""" self._isFullSky = True self._intersectIter = pmap self._dbContext = "LSST" ## FIXME. should be configurable if hints: regions = self._parseRegions(hints) self._dbContext = hints.get("db", "") ids = hints.get("objectId", "") if regions != []: self._intersectIter = pmap.intersect(regions) self._isFullSky = False if ids: chunkIds = self._getChunkIdsFromObjs(ids) if regions != []: self._intersectIter = chain(self._intersectIter, chunkIds) else: self._intersectIter = map(lambda i: (i,[]), chunkIds) self._isFullSky = False print "Affected chunks: ", [x[0] for x in self._intersectIter] pass
|
|
map(lambda (k,v): k + "," + v), hints))
|
map(lambda (k,v): k + "," + v), hints)
|
def __init__(self, query, hints, pmap, reportError, resultName=""): self.queryStr = query.strip()# Pull trailing whitespace # Force semicolon to facilitate worker-side splitting if self.queryStr[-1] != ";": # Add terminal semicolon self.queryStr += ";" self.queryHash = hashlib.md5(self.queryStr).hexdigest()[:18] self._dbContext = "LSST" # Later adjusted by hints.
|
query += self._substitution.transform(ref)
|
query += self._substitution.substituteOnly(ref)
|
def _makeChunkQuery(self, chunkId, table): # Prefix with empty subchunk spec. query = self._headerFunc() +"\n" ref = self._pConfig.chunkMapping.getMapReference(chunkId,0) query += self._createTableTmpl % table query += self._substitution.transform(ref) print query return query
|
q = self._substitution.transform(ref) q = self._fixSubChunkDb(q, chunkId, subChunkId)
|
q = self._substitution.transform(ref, chunkId, subChunkId)
|
def _makeSubChunkQuery(self, chunkId, subIter, table): qList = [None] # Include placeholder for header scList = None # Extract list first. if self._isFullSky: scList = [x for x in subIter] else: scList = [sub for (sub, regions) in subIter]
|
if s != "success":
|
resStr = getQueryStateString(s) if resStr != "success":
|
def finish(self): for (k,v) in self._inFlight.items(): s = tryJoinQuery(self._sessionId, v[0]) print "State of", k, "is", getQueryStateString(s)
|
print "Final state of all queries", getQueryStateString(s)
|
print "Final state of all queries", resStr
|
def finish(self): for (k,v) in self._inFlight.items(): s = tryJoinQuery(self._sessionId, v[0]) print "State of", k, "is", getQueryStateString(s)
|
if options.resettables == True: resetTables() return elif options.test == True:
|
if options.test == True:
|
def main(): parser = OptionParser() # Db-backed task tracking is not supported right now. # parser.add_option("--reset-tables", action="store_true", # dest="resettables", default=False, # help="Reset tables instead of starting the server ()") parser.add_option("-t", "--test", action="store_true", dest="test", default=False, help="Run tests instead of starting the server") parser.add_option("-T", dest="testName", default=None, metavar="NAME", help="Run a test named NAME.") parser.add_option("--sanity-client", action="store_true", dest="sanityClient", default=False, help="Sanity-check a running server.") parser.add_option("-c", "--config", dest="configFile", default=None, help="Use config file. Can also be specified with\n" + "%s as an environment variable." % config.envFilenameVar) (options, args) = parser.parse_args() # Modifying options if options.configFile: config.load(options.configFile) else: config.load() print "Configuration:" config.printTo(sys.stdout) if options.resettables == True: resetTables() return elif options.test == True: runParserTest() return elif options.testName: runNamedTest(options.testName) return elif options.sanityClient: client.runSanityClient() return else: server.runServer() return
|
self._brokenChunks.append(p)
|
self._brokenChunks.append(c)
|
def _joinAll(self): for (c,xo) in self.running.items(): xo.join() t = self.running.pop(c) self.finished[c] = t.successful if not xo.successful: print "Unsuccessful with %s on chunk %d" % (self.queryStr, c) self._brokenChunks.append(p) # discard thread object.
|
length = i * nominal
|
length = nominal
|
def _move(self): chunkCount = len(self._chunkFiles) nominal, extra = divmod(chunkCount, self._opts.dirs) print chunkCount, "in dirs with at least", nominal start = 0
|
self._slowDispatchTime = 0.3
|
self._slowDispatchTime = 0.5
|
def __init__(self, query): self.queryStr = query.strip()# Pull trailing whitespace # Force semicolon to facilitate worker-side splitting if self.queryStr[-1] != ";": # Add terminal semicolon self.queryStr += ";" self.queryMunger = None ## sqlparser.QueryMunger() self.db = Persistence() self.running = {} self.resultLock = threading.Lock() self.finished = {} self.threadHighWater = 130 # Python can't make more than 159?? self._slowDispatchTime = 0.3 self._brokenChunks = [] self._coolDownTime = 10 pass
|
triedAgain = False
|
def invoke(self): print "Query invoking..." stats = time.qServQueryTimer[time.qServRunningName] stats["queryActionStart"] = time.time() self.queryMunger = sqlparser.QueryMunger(self.queryStr) # 64bit hash is enough for now(testing). self.queryHash = hashlib.md5(self.queryStr).hexdigest()[:16] self.resultPath = self.setupDumpSaver(self.queryHash) query = self.queryMunger.computePartMapQuery() print "partmapquery is", query p = Persistence() p.activate() stats["partMapPrepStart"] = time.time() chunktuples = p.issueQuery(query) stats["partMapCollectStart"] = time.time() collected = self.queryMunger.collectSubChunkTuples(chunktuples) del chunktuples # Free chunktuples memory stats["partMapCollectFinish"] = time.time() #collected = dict(collected.items()[:5]) ## DEBUG: Force only 3 chunks chunkNums = collected.keys() random.shuffle(chunkNums) # Try to balance among workers. #chunkNums = chunkNums[:200] stats["partMapPrepFinish"] = time.time() createTemplate = "CREATE TABLE IF NOT EXISTS %s ENGINE=MEMORY "; insertTemplate = "INSERT INTO %s "; tableTemplate = "result_%s"; q = "" self.db.activate() # Drop result table to make room. self.applySql("test", "DROP TABLE IF EXISTS result;") for chunk in chunkNums: dispatchStart = time.time() subc = collected[chunk][:2000] # DEBUG: force less subchunks # MySQL will probably run out of memory with >2k subchunks. header = '-- SUBCHUNKS:' + ", ".join(imap(str,subc))
|
|
print "Slow dispatch detected. Draining all queries," self._progressiveJoinAll() print "Cooling down for %d seconds." % self._coolDownTime time.sleep(self._coolDownTime) print "Back to work!"
|
if triedAgain: print "Slow dispatch detected. Draining all queries," self._progressiveJoinAll() print "Cooling down for %d seconds." % self._coolDownTime time.sleep(self._coolDownTime) print "Back to work!" triedAgain = True
|
def invoke(self): print "Query invoking..." stats = time.qServQueryTimer[time.qServRunningName] stats["queryActionStart"] = time.time() self.queryMunger = sqlparser.QueryMunger(self.queryStr) # 64bit hash is enough for now(testing). self.queryHash = hashlib.md5(self.queryStr).hexdigest()[:16] self.resultPath = self.setupDumpSaver(self.queryHash) query = self.queryMunger.computePartMapQuery() print "partmapquery is", query p = Persistence() p.activate() stats["partMapPrepStart"] = time.time() chunktuples = p.issueQuery(query) stats["partMapCollectStart"] = time.time() collected = self.queryMunger.collectSubChunkTuples(chunktuples) del chunktuples # Free chunktuples memory stats["partMapCollectFinish"] = time.time() #collected = dict(collected.items()[:5]) ## DEBUG: Force only 3 chunks chunkNums = collected.keys() random.shuffle(chunkNums) # Try to balance among workers. #chunkNums = chunkNums[:200] stats["partMapPrepFinish"] = time.time() createTemplate = "CREATE TABLE IF NOT EXISTS %s ENGINE=MEMORY "; insertTemplate = "INSERT INTO %s "; tableTemplate = "result_%s"; q = "" self.db.activate() # Drop result table to make room. self.applySql("test", "DROP TABLE IF EXISTS result;") for chunk in chunkNums: dispatchStart = time.time() subc = collected[chunk][:2000] # DEBUG: force less subchunks # MySQL will probably run out of memory with >2k subchunks. header = '-- SUBCHUNKS:' + ", ".join(imap(str,subc))
|
cursor.execute("""INSERT INTO burp(machinename, port, rundate,
|
if aucreated[auid] is not None: cursor.execute("""INSERT INTO burp(machinename, port, rundate,
|
def _article_report(client, db, options): auids, auname = _get_auids(client) host, port = options.host.split(':',1) auyear = {} austatus = {} aucreated = {} aulastcrawlresult = {} aucontentsize = {} audisksize = {} aurepository = {} auarticles = {} cursor = db.cursor() for auid in auids: rerun = True numRuns = 0 while rerun: try: summary, table = client._getStatusTable('ArchivalUnitTable', auid) rerun = False except urllib2.URLError: numRuns += 1 print "%s : _article_report has a URL Error. This is try %d." % (options.host, numRuns) if numRuns > PARAM_REPEAT_GET_STATUS_TABLE: print "Giving up." raise auyear[auid] = summary.get('Year', 0) austatus[auid] = summary.get('Status') aulastcrawlresult[auid] = summary.get('Last Crawl Result', 'n/a') aucontentsize[auid] = summary.get('Content Size') if aucontentsize[auid] <> None: aucontentsize[auid].replace(",", "") else: aucontentsize[auid] = "" audisksize[auid] = summary.get('Disk Usage (MB)', 'n/a') aurepository[auid] = summary.get('Repository') created = summary.get('Created') if created is not None: aucreated[auid] = time.strptime(created, "%H:%M:%S %m/%d/%y") else: print "FAIL: created time was not set.\n" _get_list_articles(client, auid, auarticles) # Because it's hard to know if the Burp is running without SOME feedback... print options.host + ":" + auname[auid] # Note: There is no article iterator for RSC. This is a work-around. if auid.find('ClockssRoyalSocietyOfChemistryPlugin') >= 0 and (options.host.find("ingest") >= 0): _get_list_urls(client, auid, auarticles) cursor.execute("""INSERT INTO burp(machinename, port, rundate,
|
else: cursor.execute("""INSERT INTO burp(machinename, port, rundate, auname, auid, auyear, austatus, aulastcrawlresult, aucontentsize, audisksize, aurepository, numarticles, publisher, created) VALUES ("%s", "%s", NOW(), "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "default", NULL)""" % \ (host, port, auname[auid], auid, auyear[auid], austatus[auid], aulastcrawlresult[auid], aucontentsize[auid], audisksize[auid], aurepository[auid], int(len(auarticles[auid]))))
|
def _article_report(client, db, options): auids, auname = _get_auids(client) host, port = options.host.split(':',1) auyear = {} austatus = {} aucreated = {} aulastcrawlresult = {} aucontentsize = {} audisksize = {} aurepository = {} auarticles = {} cursor = db.cursor() for auid in auids: rerun = True numRuns = 0 while rerun: try: summary, table = client._getStatusTable('ArchivalUnitTable', auid) rerun = False except urllib2.URLError: numRuns += 1 print "%s : _article_report has a URL Error. This is try %d." % (options.host, numRuns) if numRuns > PARAM_REPEAT_GET_STATUS_TABLE: print "Giving up." raise auyear[auid] = summary.get('Year', 0) austatus[auid] = summary.get('Status') aulastcrawlresult[auid] = summary.get('Last Crawl Result', 'n/a') aucontentsize[auid] = summary.get('Content Size') if aucontentsize[auid] <> None: aucontentsize[auid].replace(",", "") else: aucontentsize[auid] = "" audisksize[auid] = summary.get('Disk Usage (MB)', 'n/a') aurepository[auid] = summary.get('Repository') created = summary.get('Created') if created is not None: aucreated[auid] = time.strptime(created, "%H:%M:%S %m/%d/%y") else: print "FAIL: created time was not set.\n" _get_list_articles(client, auid, auarticles) # Because it's hard to know if the Burp is running without SOME feedback... print options.host + ":" + auname[auid] # Note: There is no article iterator for RSC. This is a work-around. if auid.find('ClockssRoyalSocietyOfChemistryPlugin') >= 0 and (options.host.find("ingest") >= 0): _get_list_urls(client, auid, auarticles) cursor.execute("""INSERT INTO burp(machinename, port, rundate,
|
|
return saxutils.escape(str).decode('utf-8').encode('ascii', 'xmlcharrefreplace')
|
return saxutils.escape(str).replace('"', ''').decode('utf-8').encode('ascii', 'xmlcharrefreplace')
|
def __escape(str): from xml.sax import saxutils return saxutils.escape(str).decode('utf-8').encode('ascii', 'xmlcharrefreplace')
|
'outer': '"' if current_pub.name().find('\'') < 0 else '\'', 'inner': '\'' if current_pub.name().find('\'') < 0 else '"' }
|
'outer': '"' if "'" not in current_pub.name() else "'", 'inner': "'" if "'" not in current_pub.name() else '"' }
|
def __process(tdb, options): current_pub = None if options.style == TdbxmlConstants.OPTION_STYLE_LEGACY: print '''\ <property name="org.lockss.title">
|
if options.headings: result = [[w.capitalize() for w in fields]] + result
|
if options.names: result = [[w.capitalize() for w in fields]] + result
|
def __process(tdb, options): fields = options.fields.split(',') result = [[lam(au) or '' for lam in map(tdbq.str_to_lambda_au, fields)] for au in tdb.aus()] if options.style == TdboutConstants.OPTION_STYLE_CSV: import csv if options.headings: result = [[w.capitalize() for w in fields]] + result if options.warnings: from datetime import date warnings = [['Current as of %s' % (date.today())], ['Subject to change without notice']] result = warnings + [[]] + result + [[]] + warnings writer = csv.writer(sys.stdout, dialect='excel') for lst in result: writer.writerow(lst) else: for lst in result: print '\t'.join(lst)
|
fileInconsistent.write("\n")
|
fileInconsistent.write("\n\n")
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isBlankReport = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() print("Looking for inconsistent information within one run.\n") cursor.execute("SELECT auid, auname, max(numarticles) FROM burp WHERE rundate >= '%s' AND rundate <= '%s' GROUP BY auid, auname ORDER BY auname;" % (str(options.reportdatestart), str(options.reportdateend))) arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isBlankReport = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isBlankReport = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' AND numarticles = 0 AND aulastcrawlresult = 'successful' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() crawledbutzeroflag = True while crawledbutzero is not None: if crawledbutzeroflag: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) crawledbutzeroflag = False isBlankReport = False fileInconsistent.write("%s:%d " %(crawledbutzero[0], crawledbutzero[1])) crawledbutzero = cursor2.fetchone() if not crawledbutzeroflag: fileInconsistent.write("\n") # Verify that zero DOIs have not been waiting for too long. cursor2.execute("SELECT machinename, port, created FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' AND numarticles = 0 AND aulastcrawlresult != 'successful' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) notcrawled = cursor2.fetchone() while notcrawled is not None and notcrawled[2] is not None: timesincecreated = datetime.now() - notcrawled[2]; if timesincecreated > datetime.timedelta(OPTION_DAYS_WITHOUT_CRAWL_DEFAULT): fileInconsistent.write("`%s' has been waiting too long for a successful crawl on %s:%d." % (arAuid[1], notcrawled[0], notcrawled[1])) isBlankReport = False notcrawled = cursor2.fetchone() # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isBlankReport = False arMachineName = cursorMachine.fetchone() # Verify that the maximum number of articles is not significantly greater than the number of articles on any machine. cursorMachine.execute("SELECT numarticles, machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arNumArticles = cursorMachine.fetchone() while arNumArticles is not None: if arNumArticles[0] + options.maxarticlediff < arAuid[2] and arNumArticles[0] > 0: fileInconsistent.write("'%s' (on %s:%d) has significantly fewer articles than the maximum. It has %d articles, and the maximum is %d.\n" % (arAuid[1], arNumArticles[1], arNumArticles[2], arNumArticles[0], arAuid[2])) isBlankReport = False arNumArticles = cursorMachine.fetchone() arAuid = cursor.fetchone() # Problems within the burp report (comparing reports, report by report.) print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport WHERE auyear != '0' GROUP BY publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND auyear = '%s'" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND auyear = '%s' ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %s has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent[0])) isBlankReport = False arPublisherYear = cursor.fetchone() # List all AUs that have disappeared since the previous run. print("Listing disappearing AUs.\n") cursor.execute("SELECT auid, auname, machinename, port FROM burp WHERE rundate >= '%s' AND rundate <= '%s' GROUP BY auid, auname ORDER BY auname;" % (str(options.previousreportdatestart), str(options.previousreportdateend))) arPreviousAu = cursor.fetchone() while arPreviousAu is not None: cursor2.execute("SELECT auname FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s'" % (arPreviousAu[0], arPreviousAu[2], arPreviousAu[3], str(options.reportdatestart), str(options.reportdateend))) arFound = cursor.fetchone if arFound is None: fileInconsistent.write("The AU '%s' was known by %s:%d in the previous run, but not in the current run." % (arPreviousAu[1], arPreviousAu[2], arPreviousAu[3])) isBlankReport = False arPreviousAu = cursor.fetchone() if isBlankReport: fileInconsistent.write("Congratulations: no AU has inconsistent data!\n") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
timesincecreated = datetime.now() - notcrawled[2]; if timesincecreated > datetime.timedelta(OPTION_DAYS_WITHOUT_CRAWL_DEFAULT): fileInconsistent.write("`%s' has been waiting too long for a successful crawl on %s:%d." %
|
timeSinceCreated = datetime.now() - notcrawled[2]; if timeSinceCreated > timedelta(OPTION_DAYS_WITHOUT_CRAWL_DEFAULT): fileInconsistent.write("`%s' has been waiting too long for a successful crawl on %s:%d.\n" %
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isBlankReport = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() print("Looking for inconsistent information within one run.\n") cursor.execute("SELECT auid, auname, max(numarticles) FROM burp WHERE rundate >= '%s' AND rundate <= '%s' GROUP BY auid, auname ORDER BY auname;" % (str(options.reportdatestart), str(options.reportdateend))) arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isBlankReport = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isBlankReport = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' AND numarticles = 0 AND aulastcrawlresult = 'successful' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() crawledbutzeroflag = True while crawledbutzero is not None: if crawledbutzeroflag: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) crawledbutzeroflag = False isBlankReport = False fileInconsistent.write("%s:%d " %(crawledbutzero[0], crawledbutzero[1])) crawledbutzero = cursor2.fetchone() if not crawledbutzeroflag: fileInconsistent.write("\n") # Verify that zero DOIs have not been waiting for too long. cursor2.execute("SELECT machinename, port, created FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' AND numarticles = 0 AND aulastcrawlresult != 'successful' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) notcrawled = cursor2.fetchone() while notcrawled is not None and notcrawled[2] is not None: timesincecreated = datetime.now() - notcrawled[2]; if timesincecreated > datetime.timedelta(OPTION_DAYS_WITHOUT_CRAWL_DEFAULT): fileInconsistent.write("`%s' has been waiting too long for a successful crawl on %s:%d." % (arAuid[1], notcrawled[0], notcrawled[1])) isBlankReport = False notcrawled = cursor2.fetchone() # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isBlankReport = False arMachineName = cursorMachine.fetchone() # Verify that the maximum number of articles is not significantly greater than the number of articles on any machine. cursorMachine.execute("SELECT numarticles, machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arNumArticles = cursorMachine.fetchone() while arNumArticles is not None: if arNumArticles[0] + options.maxarticlediff < arAuid[2] and arNumArticles[0] > 0: fileInconsistent.write("'%s' (on %s:%d) has significantly fewer articles than the maximum. It has %d articles, and the maximum is %d.\n" % (arAuid[1], arNumArticles[1], arNumArticles[2], arNumArticles[0], arAuid[2])) isBlankReport = False arNumArticles = cursorMachine.fetchone() arAuid = cursor.fetchone() # Problems within the burp report (comparing reports, report by report.) print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport WHERE auyear != '0' GROUP BY publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND auyear = '%s'" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND auyear = '%s' ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %s has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent[0])) isBlankReport = False arPublisherYear = cursor.fetchone() # List all AUs that have disappeared since the previous run. print("Listing disappearing AUs.\n") cursor.execute("SELECT auid, auname, machinename, port FROM burp WHERE rundate >= '%s' AND rundate <= '%s' GROUP BY auid, auname ORDER BY auname;" % (str(options.previousreportdatestart), str(options.previousreportdateend))) arPreviousAu = cursor.fetchone() while arPreviousAu is not None: cursor2.execute("SELECT auname FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s'" % (arPreviousAu[0], arPreviousAu[2], arPreviousAu[3], str(options.reportdatestart), str(options.reportdateend))) arFound = cursor.fetchone if arFound is None: fileInconsistent.write("The AU '%s' was known by %s:%d in the previous run, but not in the current run." % (arPreviousAu[1], arPreviousAu[2], arPreviousAu[3])) isBlankReport = False arPreviousAu = cursor.fetchone() if isBlankReport: fileInconsistent.write("Congratulations: no AU has inconsistent data!\n") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
fileInconsistent.write("The AU '%s' was known by %s:%d in the previous run, but not in the current run." %
|
fileInconsistent.write("The AU '%s' was known by %s:%d in the previous run, but not in the current run.\n" %
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isBlankReport = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() print("Looking for inconsistent information within one run.\n") cursor.execute("SELECT auid, auname, max(numarticles) FROM burp WHERE rundate >= '%s' AND rundate <= '%s' GROUP BY auid, auname ORDER BY auname;" % (str(options.reportdatestart), str(options.reportdateend))) arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isBlankReport = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isBlankReport = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' AND numarticles = 0 AND aulastcrawlresult = 'successful' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() crawledbutzeroflag = True while crawledbutzero is not None: if crawledbutzeroflag: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) crawledbutzeroflag = False isBlankReport = False fileInconsistent.write("%s:%d " %(crawledbutzero[0], crawledbutzero[1])) crawledbutzero = cursor2.fetchone() if not crawledbutzeroflag: fileInconsistent.write("\n") # Verify that zero DOIs have not been waiting for too long. cursor2.execute("SELECT machinename, port, created FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' AND numarticles = 0 AND aulastcrawlresult != 'successful' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) notcrawled = cursor2.fetchone() while notcrawled is not None and notcrawled[2] is not None: timesincecreated = datetime.now() - notcrawled[2]; if timesincecreated > datetime.timedelta(OPTION_DAYS_WITHOUT_CRAWL_DEFAULT): fileInconsistent.write("`%s' has been waiting too long for a successful crawl on %s:%d." % (arAuid[1], notcrawled[0], notcrawled[1])) isBlankReport = False notcrawled = cursor2.fetchone() # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isBlankReport = False arMachineName = cursorMachine.fetchone() # Verify that the maximum number of articles is not significantly greater than the number of articles on any machine. cursorMachine.execute("SELECT numarticles, machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arNumArticles = cursorMachine.fetchone() while arNumArticles is not None: if arNumArticles[0] + options.maxarticlediff < arAuid[2] and arNumArticles[0] > 0: fileInconsistent.write("'%s' (on %s:%d) has significantly fewer articles than the maximum. It has %d articles, and the maximum is %d.\n" % (arAuid[1], arNumArticles[1], arNumArticles[2], arNumArticles[0], arAuid[2])) isBlankReport = False arNumArticles = cursorMachine.fetchone() arAuid = cursor.fetchone() # Problems within the burp report (comparing reports, report by report.) print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport WHERE auyear != '0' GROUP BY publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND auyear = '%s'" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND auyear = '%s' ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %s has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent[0])) isBlankReport = False arPublisherYear = cursor.fetchone() # List all AUs that have disappeared since the previous run. print("Listing disappearing AUs.\n") cursor.execute("SELECT auid, auname, machinename, port FROM burp WHERE rundate >= '%s' AND rundate <= '%s' GROUP BY auid, auname ORDER BY auname;" % (str(options.previousreportdatestart), str(options.previousreportdateend))) arPreviousAu = cursor.fetchone() while arPreviousAu is not None: cursor2.execute("SELECT auname FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s'" % (arPreviousAu[0], arPreviousAu[2], arPreviousAu[3], str(options.reportdatestart), str(options.reportdateend))) arFound = cursor.fetchone if arFound is None: fileInconsistent.write("The AU '%s' was known by %s:%d in the previous run, but not in the current run." % (arPreviousAu[1], arPreviousAu[2], arPreviousAu[3])) isBlankReport = False arPreviousAu = cursor.fetchone() if isBlankReport: fileInconsistent.write("Congratulations: no AU has inconsistent data!\n") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
<property name="xpath" value="[attributes/publisher='%(publisher2)s']" />
|
<property name="xpath" value=%(outer)s[attributes/publisher=%(inner)s%(publisher2)s%(inner)s]%(outer)s />
|
def _process(tdb, options): current_pub = None if options.style == TDB_STYLE_XML_LEGACY: print '''\ <property name="org.lockss.title">
|
'publisher2': re.sub(r'\'', ''', _escape(current_pub.name())) }
|
'publisher2': re.sub(r'\'', ''', _escape(current_pub.name())), 'outer': '"' if current_pub.name().find('\'') < 0 else '\'', 'inner': '\'' if current_pub.name().find('\'') < 0 else '"' }
|
def _process(tdb, options): current_pub = None if options.style == TDB_STYLE_XML_LEGACY: print '''\ <property name="org.lockss.title">
|
''' % { 'publisher': __escape(current_pub.name()),
|
''' % { 'publisher': __escape(current_pub.name().replace('.', '')),
|
def __process(tdb, options): current_pub = None if options.style == TdbxmlConstants.OPTION_STYLE_LEGACY: print '''\ <property name="org.lockss.title">
|
cursor.execute("SELECT publisher, auyear FROM burpreport group by publisher, auyear;")
|
cursor.execute("SELECT publisher, auyear FROM burpreport WHERE auyear != '0' GROUP BY publisher, auyear;")
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isEmpty = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() cursor.execute("SELECT auid, auname FROM burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' GROUP BY auid, auname ORDER BY auname;") arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): print("Testing '%s'" % (arAuid[1])) # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT aulastcrawlresult, numarticles FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() if crawledbutzero is not None: if crawledbutzero[0] == "Successful" and crawledbutzero[1] == 0: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) isEmpty = False cursorMachine.execute("SELECT machinename, port FROM burp where auid = '%s' AND aulastcrawlresult = 'Successful' AND numarticles = 0 AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: fileInconsistent.write("%s:%d " %(arMachineName[0], arMachineName[1])) arMachineName = cursorMachine.fetchone() fileInconsistent.write("\n") # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isEmpty = False arMachineName = cursorMachine.fetchone() arAuid = cursor.fetchone() # Report problems... print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport group by publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND year = %d" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND year = %d ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %d has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent)) isEmpty = False arPublisherYear = cursor.fetchone() if isEmpty: fileInconsistent.write("Congratulations: no AU has inconsistent data!") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND year = %d" % (arPublisherYear[0], arPublisherYear[1]))
|
cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND auyear = '%s'" % (arPublisherYear[0], arPublisherYear[1]))
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isEmpty = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() cursor.execute("SELECT auid, auname FROM burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' GROUP BY auid, auname ORDER BY auname;") arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): print("Testing '%s'" % (arAuid[1])) # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT aulastcrawlresult, numarticles FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() if crawledbutzero is not None: if crawledbutzero[0] == "Successful" and crawledbutzero[1] == 0: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) isEmpty = False cursorMachine.execute("SELECT machinename, port FROM burp where auid = '%s' AND aulastcrawlresult = 'Successful' AND numarticles = 0 AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: fileInconsistent.write("%s:%d " %(arMachineName[0], arMachineName[1])) arMachineName = cursorMachine.fetchone() fileInconsistent.write("\n") # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isEmpty = False arMachineName = cursorMachine.fetchone() arAuid = cursor.fetchone() # Report problems... print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport group by publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND year = %d" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND year = %d ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %d has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent)) isEmpty = False arPublisherYear = cursor.fetchone() if isEmpty: fileInconsistent.write("Congratulations: no AU has inconsistent data!") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND year = %d ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1]))
|
cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND auyear = '%s' ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1]))
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isEmpty = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() cursor.execute("SELECT auid, auname FROM burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' GROUP BY auid, auname ORDER BY auname;") arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): print("Testing '%s'" % (arAuid[1])) # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT aulastcrawlresult, numarticles FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() if crawledbutzero is not None: if crawledbutzero[0] == "Successful" and crawledbutzero[1] == 0: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) isEmpty = False cursorMachine.execute("SELECT machinename, port FROM burp where auid = '%s' AND aulastcrawlresult = 'Successful' AND numarticles = 0 AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: fileInconsistent.write("%s:%d " %(arMachineName[0], arMachineName[1])) arMachineName = cursorMachine.fetchone() fileInconsistent.write("\n") # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isEmpty = False arMachineName = cursorMachine.fetchone() arAuid = cursor.fetchone() # Report problems... print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport group by publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND year = %d" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND year = %d ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %d has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent)) isEmpty = False arPublisherYear = cursor.fetchone() if isEmpty: fileInconsistent.write("Congratulations: no AU has inconsistent data!") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
fileInconsistent.write("Publisher %s in year %d has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent))
|
fileInconsistent.write("Publisher %s in year %s has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent[0]))
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isEmpty = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() cursor.execute("SELECT auid, auname FROM burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' GROUP BY auid, auname ORDER BY auname;") arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): print("Testing '%s'" % (arAuid[1])) # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT aulastcrawlresult, numarticles FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() if crawledbutzero is not None: if crawledbutzero[0] == "Successful" and crawledbutzero[1] == 0: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) isEmpty = False cursorMachine.execute("SELECT machinename, port FROM burp where auid = '%s' AND aulastcrawlresult = 'Successful' AND numarticles = 0 AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: fileInconsistent.write("%s:%d " %(arMachineName[0], arMachineName[1])) arMachineName = cursorMachine.fetchone() fileInconsistent.write("\n") # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isEmpty = False arMachineName = cursorMachine.fetchone() arAuid = cursor.fetchone() # Report problems... print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport group by publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND year = %d" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND year = %d ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %d has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent)) isEmpty = False arPublisherYear = cursor.fetchone() if isEmpty: fileInconsistent.write("Congratulations: no AU has inconsistent data!") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
fileInconsistent.write("Congratulations: no AU has inconsistent data!")
|
fileInconsistent.write("Congratulations: no AU has inconsistent data!\n")
|
def _find_inconsistent_information(db, options): fileInconsistent = open(options.inconsistent, 'w') isEmpty = True cursor = db.cursor() cursorMachine = db.cursor() cursor2 = db.cursor() cursor3 = db.cursor() cursor.execute("SELECT auid, auname FROM burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' GROUP BY auid, auname ORDER BY auname;") arAuid = cursor.fetchone() while arAuid is not None: if _is_reported(arAuid[0]): print("Testing '%s'" % (arAuid[1])) # Verify that the years and names remain consistent across AUs. cursor2.execute("SELECT COUNT(DISTINCT(auyear)), COUNT(DISTINCT(auname)) FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") countInformation = cursor2.fetchone() if countInformation[0] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent years: \n") cursor3.execute("SELECT DISTINCT(auyear), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") year = cursor3.fetchone() while year is not None: fileInconsistent.write("%s (on %s:%d) " % (year[0], year[1], year[2])) year = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False if countInformation[1] > 1: fileInconsistent.write("`" + arAuid[1] + "' has inconsistent AU Names: \n") cursor3.execute("SELECT DISTINCT(auname), machinename, port FROM burp WHERE auid = '" + arAuid[0] + "' AND rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "';") name = cursor3.fetchone() while name is not None: fileInconsistent.write("`%s' (on %s:%d) " % (name[0], name[1], name[2])) name = cursor3.fetchone() fileInconsistent.write("\n\n") isEmpty = False # Verify that no articles have had a successful crawl, but still have zero DOIs reported. cursor2.execute("SELECT aulastcrawlresult, numarticles FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) crawledbutzero = cursor2.fetchone() if crawledbutzero is not None: if crawledbutzero[0] == "Successful" and crawledbutzero[1] == 0: fileInconsistent.write("`%s' had a successful crawl, but still has zero DOIs reported. Machines it occurred on: " % (arAuid[1],)) isEmpty = False cursorMachine.execute("SELECT machinename, port FROM burp where auid = '%s' AND aulastcrawlresult = 'Successful' AND numarticles = 0 AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: fileInconsistent.write("%s:%d " %(arMachineName[0], arMachineName[1])) arMachineName = cursorMachine.fetchone() fileInconsistent.write("\n") # Verify that the current article on one machine does not have fewer articles than any previous run. cursorMachine.execute("SELECT machinename, port FROM burp WHERE auid = '%s' AND rundate >= '%s' AND rundate <= '%s' GROUP BY machinename, port;" % (arAuid[0], str(options.reportdatestart), str(options.reportdateend))) arMachineName = cursorMachine.fetchone() while arMachineName is not None: cursor2.execute("SELECT numarticles FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d AND rundate >= '%s' AND rundate <= '%s';" % (arAuid[0], arMachineName[0], arMachineName[1], str(options.reportdatestart), str(options.reportdateend))) currentNumArticles = cursor2.fetchone() cursor3.execute("SELECT MAX(numarticles), rundate FROM burp WHERE auid = '%s' AND machinename = '%s' AND port = %d;" % (arAuid[0], arMachineName[0], arMachineName[1])) bestNumArticles = cursor3.fetchone() # This message should only output if we haven't already reported that it's zero. if currentNumArticles[0] < bestNumArticles[0] and currentNumArticles[0] > 0: fileInconsistent.write("`%s' (on %s:%d) has seen its number of articles decrease to %d (current run) from %d (on %s).\n" %(arAuid[1], arMachineName[0], arMachineName[1], currentNumArticles[0], bestNumArticles[0], bestNumArticles[1])) isEmpty = False arMachineName = cursorMachine.fetchone() arAuid = cursor.fetchone() # Report problems... print("Testing problems within report.\n") cursor.execute("SELECT publisher, auyear FROM burpreport group by publisher, auyear;") arPublisherYear = cursor.fetchone() while arPublisherYear is not None: cursor2.execute("SELECT MAX(numarticles), rundate FROM burpreport WHERE publisher = '%s' AND year = %d" % (arPublisherYear[0], arPublisherYear[1])) highestInYear = cursor2.fetchone() cursor3.execute("SELECT numarticles FROM burpreport WHERE publisher = '%s' AND year = %d ORDER BY rundate DESC" % (arPublisherYear[0], arPublisherYear[1])) mostRecent = cursor3.fetchone() if mostRecent[0] < highestInYear[0]: fileInconsistent.write("Publisher %s in year %d has seen its total number of articles decrease from %d (in report generated on %s) to %d.\n" % (arPublisherYear[0], arPublisherYear[1], highestInYear[0], highestInYear[1].strftime("%d-%b-%Y"), mostRecent)) isEmpty = False arPublisherYear = cursor.fetchone() if isEmpty: fileInconsistent.write("Congratulations: no AU has inconsistent data!") fileInconsistent.close() cursor3.close() cursor2.close() cursor.close()
|
print "startExecution = %s" % (startExecution, )
|
def _article_report(client, db, options): auids, auname = _get_auids(client) host, port = options.host.split(':',1) auyear = {} austatus = {} aucreated = {} aulastcrawlresult = {} aucontentsize = {} audisksize = {} aurepository = {} auarticles = {} cursor = db.cursor() cursorStarted = db.cursor() cursorStarted.execute("SELECT MAX(start) FROM executions"); arStarted = cursorStarted.fetchone() if arStarted is None: print "You must insert the start time before you do an article report.\n" raise RuntimeError("You must insert the start time before you do an article report.") startExecution = arStarted[0] print "startExecution = %s" % (startExecution, ) for auid in auids: # Because it's hard to know if the Burp is running without SOME feedback... print options.host + ":" + auname[auid] # Skip the AUID if it has been seen in this (overall) execution. cursor.execute("SELECT MAX(rundate) FROM burp WHERE machinename = '%s' AND port = %s AND auid = '%s'" % (host, port, auid)) arRunDate = cursor.fetchone() if (arRunDate is not None) and (arRunDate[0] > startExecution): print("Skipping: This AU was last recorded on %s, and the execution started on %s." % (time.strftime("%Y-%m-%d %H:%M:%S", arRunDate[0]), time.strftime("%Y-%m-%d %H:%M:%S", startExecution))) continue rerun = True numRuns = 0 while rerun: try: summary, table = client._getStatusTable('ArchivalUnitTable', auid) rerun = False except urllib2.URLError: numRuns += 1 print "%s : _article_report has a URL Error. This is try %d." % (options.host, numRuns) if numRuns > PARAM_REPEAT_GET_STATUS_TABLE: print "Giving up." raise auyear[auid] = summary.get('Year', 0) austatus[auid] = summary.get('Status') aulastcrawlresult[auid] = summary.get('Last Crawl Result', 'n/a') aucontentsize[auid] = summary.get('Content Size') if aucontentsize[auid] <> None: aucontentsize[auid].replace(",", "") else: aucontentsize[auid] = "" audisksize[auid] = summary.get('Disk Usage (MB)', 'n/a') aurepository[auid] = summary.get('Repository') created = summary.get('Created') if created is not None: try: aucreated[auid] = time.strptime(created, "%H:%M:%S %m/%d/%y") except ValueError: print "FAIL: 'Created' date was '%s', which is not the right format. Continuing.\n" % (created,) aucreated[auid] = None else: print "FAIL: created time was not set.\n" aucreated[auid] = None _get_list_articles(client, auid, auarticles) # Note: There is no article iterator for RSC. This is a work-around. if auid.find('ClockssRoyalSocietyOfChemistryPlugin') >= 0 and (options.host.find("ingest") >= 0): _get_list_urls(client, auid, auarticles) cursor.execute("""INSERT INTO burp(machinename, port, rundate,
|
|
if (arRunDate is not None) and (arRunDate[0] > startExecution): print("Skipping: This AU was last recorded on %s, and the execution started on %s." % (time.strftime("%Y-%m-%d %H:%M:%S", arRunDate[0]), time.strftime("%Y-%m-%d %H:%M:%S", startExecution)))
|
if (arRunDate is None) or (arRunDate[0] is None): arRunDate = [datetime.datetime(1900, 1, 1)] if (arRunDate[0] > startExecution): print("Skipping: This AU was last recorded on %s." % (arRunDate[0].strftime("%Y-%m-%d %H:%M:%S"),)) print("The execution started on %s." % (startExecution.strftime("%Y-%m-%d %H:%M:%S"),))
|
def _article_report(client, db, options): auids, auname = _get_auids(client) host, port = options.host.split(':',1) auyear = {} austatus = {} aucreated = {} aulastcrawlresult = {} aucontentsize = {} audisksize = {} aurepository = {} auarticles = {} cursor = db.cursor() cursorStarted = db.cursor() cursorStarted.execute("SELECT MAX(start) FROM executions"); arStarted = cursorStarted.fetchone() if arStarted is None: print "You must insert the start time before you do an article report.\n" raise RuntimeError("You must insert the start time before you do an article report.") startExecution = arStarted[0] print "startExecution = %s" % (startExecution, ) for auid in auids: # Because it's hard to know if the Burp is running without SOME feedback... print options.host + ":" + auname[auid] # Skip the AUID if it has been seen in this (overall) execution. cursor.execute("SELECT MAX(rundate) FROM burp WHERE machinename = '%s' AND port = %s AND auid = '%s'" % (host, port, auid)) arRunDate = cursor.fetchone() if (arRunDate is not None) and (arRunDate[0] > startExecution): print("Skipping: This AU was last recorded on %s, and the execution started on %s." % (time.strftime("%Y-%m-%d %H:%M:%S", arRunDate[0]), time.strftime("%Y-%m-%d %H:%M:%S", startExecution))) continue rerun = True numRuns = 0 while rerun: try: summary, table = client._getStatusTable('ArchivalUnitTable', auid) rerun = False except urllib2.URLError: numRuns += 1 print "%s : _article_report has a URL Error. This is try %d." % (options.host, numRuns) if numRuns > PARAM_REPEAT_GET_STATUS_TABLE: print "Giving up." raise auyear[auid] = summary.get('Year', 0) austatus[auid] = summary.get('Status') aulastcrawlresult[auid] = summary.get('Last Crawl Result', 'n/a') aucontentsize[auid] = summary.get('Content Size') if aucontentsize[auid] <> None: aucontentsize[auid].replace(",", "") else: aucontentsize[auid] = "" audisksize[auid] = summary.get('Disk Usage (MB)', 'n/a') aurepository[auid] = summary.get('Repository') created = summary.get('Created') if created is not None: try: aucreated[auid] = time.strptime(created, "%H:%M:%S %m/%d/%y") except ValueError: print "FAIL: 'Created' date was '%s', which is not the right format. Continuing.\n" % (created,) aucreated[auid] = None else: print "FAIL: created time was not set.\n" aucreated[auid] = None _get_list_articles(client, auid, auarticles) # Note: There is no article iterator for RSC. This is a work-around. if auid.find('ClockssRoyalSocietyOfChemistryPlugin') >= 0 and (options.host.find("ingest") >= 0): _get_list_urls(client, auid, auarticles) cursor.execute("""INSERT INTO burp(machinename, port, rundate,
|
total['rsp'][strYear] += articles[0]
|
total['rsp'] += articles[0]
|
def _main_procedure(): parser = _make_command_line_parser() (options, args) = parser.parse_args(values=parser.get_default_values()) _check_required_options(parser, options) db = MySQLdb.connect(host="localhost", user="edwardsb", passwd=options.dbpassword, db="burp") _update_required_options(db, options) # Initialize the hashes. # WARNING: If you update this list, you need to update three places: # 1. The list of publishers in the 'while auid is not None' loop. # 2. The summary report # 3. In BurpCheck.py, the _is_reported() method. publishers = ['aap', 'aip', 'ama', 'aps', 'acm', 'bep', 'bmc', 'cap', 'eup', 'elsevier', 'gtv', 'iop', 'wiley', 'lup', 'npg', 'oup', 'rup', 'rsc', 'rsp', 'sage', 'springer', 'ssr', 'tf'] total = {} pubyear = {} for publisher in publishers: pubyear[publisher] = {} total[publisher] = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) pubyear[publisher][strYear] = 0 cursorAuid = db.cursor() cursorAuid.execute("SELECT DISTINCT(auid) from burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' order by auid;") auid = cursorAuid.fetchone() while auid is not None: cursorArticles = db.cursor() cursorArticles.execute("SELECT MAX(numarticles), auyear FROM burp WHERE auid = \"" + auid[0] + "\" and rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) +"';") articles = cursorArticles.fetchone() strYear = articles[1] if "-" in strYear: strYear = strYear[5:9] # If you change this list, be sure to change the equivalent # list in BurpCheck.py. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("aappublications") != -1): pubyear['aap'][strYear] += articles[0] total['aap'] += articles[0] # No criteria for American Institute of Physics. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("ama-assn") != -1): pubyear['ama'][strYear] += articles[0] total['ama'] += articles[0] if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("physiology%2Eorg") != -1): pubyear['aps'][strYear] += articles[0] total['aps'] += articles[0] # No criteria for Association for Computing Machinery if (auid[0].find("ClockssBerkeleyElectronicPressPlugin") != -1): pubyear['bep'][strYear] += articles[0] total['bep'] += articles[0] # No criteria for BioMed Central. if (auid[0].find("ClockssCoActionPublishingPlugin") != -1): pubyear['cap'][strYear] += articles[0] total['cap'] += articles[0] if (auid[0].find("ClockssEdinburghUniversityPressPlugin") != -1): pubyear['eup'][strYear] += articles[0] total['eup'] += articles[0] # No criteria for Elsevier. if (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1): pubyear['gtv'][strYear] += articles[0] total['gtv'] += articles[0] # No criteria for IOP Publishing # No criteria for Wiley # No criteria for Liverpool if (auid[0].find("ClockssNaturePublishingGroupPlugin") != -1): pubyear['npg'][strYear] += articles[0] total['npg'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("oxfordjournals") != -1): pubyear['oup'][strYear] += articles[0] total['oup'] += articles[0] # No criteria for Rockefeller University Press if (auid[0].find("ClockssRoyalSocietyOfChemistryPlugin") != -1): pubyear['rsc'][strYear] += articles[0] total['rsc'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0): pubyear['rsp'][strYear] += articles[0] total['rsp'][strYear] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("sagepub") != -1): pubyear['sage'][strYear] += articles[0] total['sage'] += articles[0] # No criteria for Springer if (auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1): pubyear['ssr'][strYear] += articles[0] total['ssr'] += articles[0] # No criteria for Taylor & Francis auid = cursorAuid.fetchone() # Verify our numbers! for publisher in publishers: testTotal = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) testTotal += pubyear[publisher][strYear] if total[publisher] != testTotal: print "ERROR: Publisher " + publisher + " doesn't have the right total." # Post the report to the database for publisher in publishers: for year in range(options.currentyear, options.minimumyear - 1, -1) + [0]: strYear = str(year) cursorAuid.execute("INSERT INTO burpreport(rundate, publisher, auyear, numarticles) VALUES (NOW(), \"%s\", %d, %d);" % (publisher, year, pubyear[publisher][strYear])) # Output the main report. filename = open(options.filename, 'w') filename.write("Dates of ingest," + str(options.reportdatestart) + " - " + str(options.reportdateend) + "\n") filename.write("Date of report," + str(date.today()) + "\n") filename.write("year,") for publisher in publishers: filename.write(publisher.upper() + ",") # Yes, there's an extra comma at the end. Same with everything else. # If that's a problem, we can fix it. filename.write("\n"); for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) printYear = strYear if (year == options.currentyear): printYear = "Current Ingest " + strYear if (year == "0"): printYear = "Data entry in progress -- Year TBD" filename.write( printYear + "," ) for publisher in publishers: filename.write(str(pubyear[publisher][strYear]) + ",") filename.write("\n") filename.write("Back Ingest Total,") for publisher in publishers: filename.write(str(total[publisher]) + ",") filename.write("\n") # Output the new summary report. # WARNING: I don't have a simple way to keep track of names compared # against publishers. You need to update these in two places: the # list called 'publishers' and here. currentyear = str(options.currentyear) summary = open(options.summary, "w") summary.write("Official Publisher Name,Publisher ID, Total Ingest for " + str(currentyear) + ", Total Ingest For all time\n") _print_summary_line(summary, "American Academy of Pediatrics", "aap", currentyear, pubyear, total) _print_summary_line(summary, "American Institute of Physics", "aip", currentyear, pubyear, total) _print_summary_line(summary, "American Medical Association", "ama", currentyear, pubyear, total) _print_summary_line(summary, "American Physiological Society", "aps", currentyear, pubyear, total) _print_summary_line(summary, "Association for Computing Machinery", "acm", currentyear, pubyear, total) _print_summary_line(summary, "Berkeley Electronic Press", "bep", currentyear, pubyear, total) _print_summary_line(summary, "BioMed Central", "bmc", currentyear, pubyear, total) _print_summary_line(summary, "Co-Action Publishing", currentyear, pubyear, total) _print_summary_line(summary, "Edinburgh University Press", "eup", currentyear, pubyear, total) _print_summary_line(summary, "Elsevier", "elsevier", currentyear, pubyear, total) _print_summary_line(summary, "Georg Thieme Verlag", currentyear, pubyear, total) _print_summary_line(summary, "IOP Publishing", "iop", currentyear, pubyear, total) _print_summary_line(summary, "John Wiley and Sons", "wiley", currentyear, pubyear, total) _print_summary_line(summary, "Liverpool University Press", "lup", currentyear, pubyear, total) _print_summary_line(summary, "Nature Publishing Group", "npg", currentyear, pubyear, total) _print_summary_line(summary, "Oxford University Press", "oup", currentyear, pubyear, total) _print_summary_line(summary, "Rockefeller University Press", "rup", currentyear, pubyear, total) _print_summary_line(summary, "RSC Publishing", "rsc", currentyear, pubyear, total) _print_summary_line(summary, "Royal Society Publishing", "rs", currentyear, pubyear, total) _print_summary_line(summary, "SAGE Publications", "sage", currentyear, pubyear, total) _print_summary_line(summary, "Springer", "springer", currentyear, pubyear, total) _print_summary_line(summary, "Society for the Study of Reproduction", currentyear, pubyear, total) _print_summary_line(summary, "Taylor and Francis", "tf", currentyear, pubyear, total)
|
_print_summary_line(summary, "Co-Action Publishing", currentyear, pubyear, total)
|
_print_summary_line(summary, "Co-Action Publishing", "cap", currentyear, pubyear, total)
|
def _main_procedure(): parser = _make_command_line_parser() (options, args) = parser.parse_args(values=parser.get_default_values()) _check_required_options(parser, options) db = MySQLdb.connect(host="localhost", user="edwardsb", passwd=options.dbpassword, db="burp") _update_required_options(db, options) # Initialize the hashes. # WARNING: If you update this list, you need to update three places: # 1. The list of publishers in the 'while auid is not None' loop. # 2. The summary report # 3. In BurpCheck.py, the _is_reported() method. publishers = ['aap', 'aip', 'ama', 'aps', 'acm', 'bep', 'bmc', 'cap', 'eup', 'elsevier', 'gtv', 'iop', 'wiley', 'lup', 'npg', 'oup', 'rup', 'rsc', 'rsp', 'sage', 'springer', 'ssr', 'tf'] total = {} pubyear = {} for publisher in publishers: pubyear[publisher] = {} total[publisher] = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) pubyear[publisher][strYear] = 0 cursorAuid = db.cursor() cursorAuid.execute("SELECT DISTINCT(auid) from burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' order by auid;") auid = cursorAuid.fetchone() while auid is not None: cursorArticles = db.cursor() cursorArticles.execute("SELECT MAX(numarticles), auyear FROM burp WHERE auid = \"" + auid[0] + "\" and rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) +"';") articles = cursorArticles.fetchone() strYear = articles[1] if "-" in strYear: strYear = strYear[5:9] # If you change this list, be sure to change the equivalent # list in BurpCheck.py. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("aappublications") != -1): pubyear['aap'][strYear] += articles[0] total['aap'] += articles[0] # No criteria for American Institute of Physics. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("ama-assn") != -1): pubyear['ama'][strYear] += articles[0] total['ama'] += articles[0] if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("physiology%2Eorg") != -1): pubyear['aps'][strYear] += articles[0] total['aps'] += articles[0] # No criteria for Association for Computing Machinery if (auid[0].find("ClockssBerkeleyElectronicPressPlugin") != -1): pubyear['bep'][strYear] += articles[0] total['bep'] += articles[0] # No criteria for BioMed Central. if (auid[0].find("ClockssCoActionPublishingPlugin") != -1): pubyear['cap'][strYear] += articles[0] total['cap'] += articles[0] if (auid[0].find("ClockssEdinburghUniversityPressPlugin") != -1): pubyear['eup'][strYear] += articles[0] total['eup'] += articles[0] # No criteria for Elsevier. if (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1): pubyear['gtv'][strYear] += articles[0] total['gtv'] += articles[0] # No criteria for IOP Publishing # No criteria for Wiley # No criteria for Liverpool if (auid[0].find("ClockssNaturePublishingGroupPlugin") != -1): pubyear['npg'][strYear] += articles[0] total['npg'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("oxfordjournals") != -1): pubyear['oup'][strYear] += articles[0] total['oup'] += articles[0] # No criteria for Rockefeller University Press if (auid[0].find("ClockssRoyalSocietyOfChemistryPlugin") != -1): pubyear['rsc'][strYear] += articles[0] total['rsc'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0): pubyear['rsp'][strYear] += articles[0] total['rsp'][strYear] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("sagepub") != -1): pubyear['sage'][strYear] += articles[0] total['sage'] += articles[0] # No criteria for Springer if (auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1): pubyear['ssr'][strYear] += articles[0] total['ssr'] += articles[0] # No criteria for Taylor & Francis auid = cursorAuid.fetchone() # Verify our numbers! for publisher in publishers: testTotal = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) testTotal += pubyear[publisher][strYear] if total[publisher] != testTotal: print "ERROR: Publisher " + publisher + " doesn't have the right total." # Post the report to the database for publisher in publishers: for year in range(options.currentyear, options.minimumyear - 1, -1) + [0]: strYear = str(year) cursorAuid.execute("INSERT INTO burpreport(rundate, publisher, auyear, numarticles) VALUES (NOW(), \"%s\", %d, %d);" % (publisher, year, pubyear[publisher][strYear])) # Output the main report. filename = open(options.filename, 'w') filename.write("Dates of ingest," + str(options.reportdatestart) + " - " + str(options.reportdateend) + "\n") filename.write("Date of report," + str(date.today()) + "\n") filename.write("year,") for publisher in publishers: filename.write(publisher.upper() + ",") # Yes, there's an extra comma at the end. Same with everything else. # If that's a problem, we can fix it. filename.write("\n"); for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) printYear = strYear if (year == options.currentyear): printYear = "Current Ingest " + strYear if (year == "0"): printYear = "Data entry in progress -- Year TBD" filename.write( printYear + "," ) for publisher in publishers: filename.write(str(pubyear[publisher][strYear]) + ",") filename.write("\n") filename.write("Back Ingest Total,") for publisher in publishers: filename.write(str(total[publisher]) + ",") filename.write("\n") # Output the new summary report. # WARNING: I don't have a simple way to keep track of names compared # against publishers. You need to update these in two places: the # list called 'publishers' and here. currentyear = str(options.currentyear) summary = open(options.summary, "w") summary.write("Official Publisher Name,Publisher ID, Total Ingest for " + str(currentyear) + ", Total Ingest For all time\n") _print_summary_line(summary, "American Academy of Pediatrics", "aap", currentyear, pubyear, total) _print_summary_line(summary, "American Institute of Physics", "aip", currentyear, pubyear, total) _print_summary_line(summary, "American Medical Association", "ama", currentyear, pubyear, total) _print_summary_line(summary, "American Physiological Society", "aps", currentyear, pubyear, total) _print_summary_line(summary, "Association for Computing Machinery", "acm", currentyear, pubyear, total) _print_summary_line(summary, "Berkeley Electronic Press", "bep", currentyear, pubyear, total) _print_summary_line(summary, "BioMed Central", "bmc", currentyear, pubyear, total) _print_summary_line(summary, "Co-Action Publishing", currentyear, pubyear, total) _print_summary_line(summary, "Edinburgh University Press", "eup", currentyear, pubyear, total) _print_summary_line(summary, "Elsevier", "elsevier", currentyear, pubyear, total) _print_summary_line(summary, "Georg Thieme Verlag", currentyear, pubyear, total) _print_summary_line(summary, "IOP Publishing", "iop", currentyear, pubyear, total) _print_summary_line(summary, "John Wiley and Sons", "wiley", currentyear, pubyear, total) _print_summary_line(summary, "Liverpool University Press", "lup", currentyear, pubyear, total) _print_summary_line(summary, "Nature Publishing Group", "npg", currentyear, pubyear, total) _print_summary_line(summary, "Oxford University Press", "oup", currentyear, pubyear, total) _print_summary_line(summary, "Rockefeller University Press", "rup", currentyear, pubyear, total) _print_summary_line(summary, "RSC Publishing", "rsc", currentyear, pubyear, total) _print_summary_line(summary, "Royal Society Publishing", "rs", currentyear, pubyear, total) _print_summary_line(summary, "SAGE Publications", "sage", currentyear, pubyear, total) _print_summary_line(summary, "Springer", "springer", currentyear, pubyear, total) _print_summary_line(summary, "Society for the Study of Reproduction", currentyear, pubyear, total) _print_summary_line(summary, "Taylor and Francis", "tf", currentyear, pubyear, total)
|
_print_summary_line(summary, "Georg Thieme Verlag", currentyear, pubyear, total)
|
_print_summary_line(summary, "Georg Thieme Verlag", "gtv", currentyear, pubyear, total)
|
def _main_procedure(): parser = _make_command_line_parser() (options, args) = parser.parse_args(values=parser.get_default_values()) _check_required_options(parser, options) db = MySQLdb.connect(host="localhost", user="edwardsb", passwd=options.dbpassword, db="burp") _update_required_options(db, options) # Initialize the hashes. # WARNING: If you update this list, you need to update three places: # 1. The list of publishers in the 'while auid is not None' loop. # 2. The summary report # 3. In BurpCheck.py, the _is_reported() method. publishers = ['aap', 'aip', 'ama', 'aps', 'acm', 'bep', 'bmc', 'cap', 'eup', 'elsevier', 'gtv', 'iop', 'wiley', 'lup', 'npg', 'oup', 'rup', 'rsc', 'rsp', 'sage', 'springer', 'ssr', 'tf'] total = {} pubyear = {} for publisher in publishers: pubyear[publisher] = {} total[publisher] = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) pubyear[publisher][strYear] = 0 cursorAuid = db.cursor() cursorAuid.execute("SELECT DISTINCT(auid) from burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' order by auid;") auid = cursorAuid.fetchone() while auid is not None: cursorArticles = db.cursor() cursorArticles.execute("SELECT MAX(numarticles), auyear FROM burp WHERE auid = \"" + auid[0] + "\" and rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) +"';") articles = cursorArticles.fetchone() strYear = articles[1] if "-" in strYear: strYear = strYear[5:9] # If you change this list, be sure to change the equivalent # list in BurpCheck.py. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("aappublications") != -1): pubyear['aap'][strYear] += articles[0] total['aap'] += articles[0] # No criteria for American Institute of Physics. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("ama-assn") != -1): pubyear['ama'][strYear] += articles[0] total['ama'] += articles[0] if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("physiology%2Eorg") != -1): pubyear['aps'][strYear] += articles[0] total['aps'] += articles[0] # No criteria for Association for Computing Machinery if (auid[0].find("ClockssBerkeleyElectronicPressPlugin") != -1): pubyear['bep'][strYear] += articles[0] total['bep'] += articles[0] # No criteria for BioMed Central. if (auid[0].find("ClockssCoActionPublishingPlugin") != -1): pubyear['cap'][strYear] += articles[0] total['cap'] += articles[0] if (auid[0].find("ClockssEdinburghUniversityPressPlugin") != -1): pubyear['eup'][strYear] += articles[0] total['eup'] += articles[0] # No criteria for Elsevier. if (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1): pubyear['gtv'][strYear] += articles[0] total['gtv'] += articles[0] # No criteria for IOP Publishing # No criteria for Wiley # No criteria for Liverpool if (auid[0].find("ClockssNaturePublishingGroupPlugin") != -1): pubyear['npg'][strYear] += articles[0] total['npg'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("oxfordjournals") != -1): pubyear['oup'][strYear] += articles[0] total['oup'] += articles[0] # No criteria for Rockefeller University Press if (auid[0].find("ClockssRoyalSocietyOfChemistryPlugin") != -1): pubyear['rsc'][strYear] += articles[0] total['rsc'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0): pubyear['rsp'][strYear] += articles[0] total['rsp'][strYear] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("sagepub") != -1): pubyear['sage'][strYear] += articles[0] total['sage'] += articles[0] # No criteria for Springer if (auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1): pubyear['ssr'][strYear] += articles[0] total['ssr'] += articles[0] # No criteria for Taylor & Francis auid = cursorAuid.fetchone() # Verify our numbers! for publisher in publishers: testTotal = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) testTotal += pubyear[publisher][strYear] if total[publisher] != testTotal: print "ERROR: Publisher " + publisher + " doesn't have the right total." # Post the report to the database for publisher in publishers: for year in range(options.currentyear, options.minimumyear - 1, -1) + [0]: strYear = str(year) cursorAuid.execute("INSERT INTO burpreport(rundate, publisher, auyear, numarticles) VALUES (NOW(), \"%s\", %d, %d);" % (publisher, year, pubyear[publisher][strYear])) # Output the main report. filename = open(options.filename, 'w') filename.write("Dates of ingest," + str(options.reportdatestart) + " - " + str(options.reportdateend) + "\n") filename.write("Date of report," + str(date.today()) + "\n") filename.write("year,") for publisher in publishers: filename.write(publisher.upper() + ",") # Yes, there's an extra comma at the end. Same with everything else. # If that's a problem, we can fix it. filename.write("\n"); for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) printYear = strYear if (year == options.currentyear): printYear = "Current Ingest " + strYear if (year == "0"): printYear = "Data entry in progress -- Year TBD" filename.write( printYear + "," ) for publisher in publishers: filename.write(str(pubyear[publisher][strYear]) + ",") filename.write("\n") filename.write("Back Ingest Total,") for publisher in publishers: filename.write(str(total[publisher]) + ",") filename.write("\n") # Output the new summary report. # WARNING: I don't have a simple way to keep track of names compared # against publishers. You need to update these in two places: the # list called 'publishers' and here. currentyear = str(options.currentyear) summary = open(options.summary, "w") summary.write("Official Publisher Name,Publisher ID, Total Ingest for " + str(currentyear) + ", Total Ingest For all time\n") _print_summary_line(summary, "American Academy of Pediatrics", "aap", currentyear, pubyear, total) _print_summary_line(summary, "American Institute of Physics", "aip", currentyear, pubyear, total) _print_summary_line(summary, "American Medical Association", "ama", currentyear, pubyear, total) _print_summary_line(summary, "American Physiological Society", "aps", currentyear, pubyear, total) _print_summary_line(summary, "Association for Computing Machinery", "acm", currentyear, pubyear, total) _print_summary_line(summary, "Berkeley Electronic Press", "bep", currentyear, pubyear, total) _print_summary_line(summary, "BioMed Central", "bmc", currentyear, pubyear, total) _print_summary_line(summary, "Co-Action Publishing", currentyear, pubyear, total) _print_summary_line(summary, "Edinburgh University Press", "eup", currentyear, pubyear, total) _print_summary_line(summary, "Elsevier", "elsevier", currentyear, pubyear, total) _print_summary_line(summary, "Georg Thieme Verlag", currentyear, pubyear, total) _print_summary_line(summary, "IOP Publishing", "iop", currentyear, pubyear, total) _print_summary_line(summary, "John Wiley and Sons", "wiley", currentyear, pubyear, total) _print_summary_line(summary, "Liverpool University Press", "lup", currentyear, pubyear, total) _print_summary_line(summary, "Nature Publishing Group", "npg", currentyear, pubyear, total) _print_summary_line(summary, "Oxford University Press", "oup", currentyear, pubyear, total) _print_summary_line(summary, "Rockefeller University Press", "rup", currentyear, pubyear, total) _print_summary_line(summary, "RSC Publishing", "rsc", currentyear, pubyear, total) _print_summary_line(summary, "Royal Society Publishing", "rs", currentyear, pubyear, total) _print_summary_line(summary, "SAGE Publications", "sage", currentyear, pubyear, total) _print_summary_line(summary, "Springer", "springer", currentyear, pubyear, total) _print_summary_line(summary, "Society for the Study of Reproduction", currentyear, pubyear, total) _print_summary_line(summary, "Taylor and Francis", "tf", currentyear, pubyear, total)
|
_print_summary_line(summary, "Royal Society Publishing", "rs", currentyear, pubyear, total)
|
_print_summary_line(summary, "Royal Society Publishing", "rsp", currentyear, pubyear, total)
|
def _main_procedure(): parser = _make_command_line_parser() (options, args) = parser.parse_args(values=parser.get_default_values()) _check_required_options(parser, options) db = MySQLdb.connect(host="localhost", user="edwardsb", passwd=options.dbpassword, db="burp") _update_required_options(db, options) # Initialize the hashes. # WARNING: If you update this list, you need to update three places: # 1. The list of publishers in the 'while auid is not None' loop. # 2. The summary report # 3. In BurpCheck.py, the _is_reported() method. publishers = ['aap', 'aip', 'ama', 'aps', 'acm', 'bep', 'bmc', 'cap', 'eup', 'elsevier', 'gtv', 'iop', 'wiley', 'lup', 'npg', 'oup', 'rup', 'rsc', 'rsp', 'sage', 'springer', 'ssr', 'tf'] total = {} pubyear = {} for publisher in publishers: pubyear[publisher] = {} total[publisher] = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) pubyear[publisher][strYear] = 0 cursorAuid = db.cursor() cursorAuid.execute("SELECT DISTINCT(auid) from burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' order by auid;") auid = cursorAuid.fetchone() while auid is not None: cursorArticles = db.cursor() cursorArticles.execute("SELECT MAX(numarticles), auyear FROM burp WHERE auid = \"" + auid[0] + "\" and rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) +"';") articles = cursorArticles.fetchone() strYear = articles[1] if "-" in strYear: strYear = strYear[5:9] # If you change this list, be sure to change the equivalent # list in BurpCheck.py. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("aappublications") != -1): pubyear['aap'][strYear] += articles[0] total['aap'] += articles[0] # No criteria for American Institute of Physics. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("ama-assn") != -1): pubyear['ama'][strYear] += articles[0] total['ama'] += articles[0] if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("physiology%2Eorg") != -1): pubyear['aps'][strYear] += articles[0] total['aps'] += articles[0] # No criteria for Association for Computing Machinery if (auid[0].find("ClockssBerkeleyElectronicPressPlugin") != -1): pubyear['bep'][strYear] += articles[0] total['bep'] += articles[0] # No criteria for BioMed Central. if (auid[0].find("ClockssCoActionPublishingPlugin") != -1): pubyear['cap'][strYear] += articles[0] total['cap'] += articles[0] if (auid[0].find("ClockssEdinburghUniversityPressPlugin") != -1): pubyear['eup'][strYear] += articles[0] total['eup'] += articles[0] # No criteria for Elsevier. if (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1): pubyear['gtv'][strYear] += articles[0] total['gtv'] += articles[0] # No criteria for IOP Publishing # No criteria for Wiley # No criteria for Liverpool if (auid[0].find("ClockssNaturePublishingGroupPlugin") != -1): pubyear['npg'][strYear] += articles[0] total['npg'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("oxfordjournals") != -1): pubyear['oup'][strYear] += articles[0] total['oup'] += articles[0] # No criteria for Rockefeller University Press if (auid[0].find("ClockssRoyalSocietyOfChemistryPlugin") != -1): pubyear['rsc'][strYear] += articles[0] total['rsc'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0): pubyear['rsp'][strYear] += articles[0] total['rsp'][strYear] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("sagepub") != -1): pubyear['sage'][strYear] += articles[0] total['sage'] += articles[0] # No criteria for Springer if (auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1): pubyear['ssr'][strYear] += articles[0] total['ssr'] += articles[0] # No criteria for Taylor & Francis auid = cursorAuid.fetchone() # Verify our numbers! for publisher in publishers: testTotal = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) testTotal += pubyear[publisher][strYear] if total[publisher] != testTotal: print "ERROR: Publisher " + publisher + " doesn't have the right total." # Post the report to the database for publisher in publishers: for year in range(options.currentyear, options.minimumyear - 1, -1) + [0]: strYear = str(year) cursorAuid.execute("INSERT INTO burpreport(rundate, publisher, auyear, numarticles) VALUES (NOW(), \"%s\", %d, %d);" % (publisher, year, pubyear[publisher][strYear])) # Output the main report. filename = open(options.filename, 'w') filename.write("Dates of ingest," + str(options.reportdatestart) + " - " + str(options.reportdateend) + "\n") filename.write("Date of report," + str(date.today()) + "\n") filename.write("year,") for publisher in publishers: filename.write(publisher.upper() + ",") # Yes, there's an extra comma at the end. Same with everything else. # If that's a problem, we can fix it. filename.write("\n"); for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) printYear = strYear if (year == options.currentyear): printYear = "Current Ingest " + strYear if (year == "0"): printYear = "Data entry in progress -- Year TBD" filename.write( printYear + "," ) for publisher in publishers: filename.write(str(pubyear[publisher][strYear]) + ",") filename.write("\n") filename.write("Back Ingest Total,") for publisher in publishers: filename.write(str(total[publisher]) + ",") filename.write("\n") # Output the new summary report. # WARNING: I don't have a simple way to keep track of names compared # against publishers. You need to update these in two places: the # list called 'publishers' and here. currentyear = str(options.currentyear) summary = open(options.summary, "w") summary.write("Official Publisher Name,Publisher ID, Total Ingest for " + str(currentyear) + ", Total Ingest For all time\n") _print_summary_line(summary, "American Academy of Pediatrics", "aap", currentyear, pubyear, total) _print_summary_line(summary, "American Institute of Physics", "aip", currentyear, pubyear, total) _print_summary_line(summary, "American Medical Association", "ama", currentyear, pubyear, total) _print_summary_line(summary, "American Physiological Society", "aps", currentyear, pubyear, total) _print_summary_line(summary, "Association for Computing Machinery", "acm", currentyear, pubyear, total) _print_summary_line(summary, "Berkeley Electronic Press", "bep", currentyear, pubyear, total) _print_summary_line(summary, "BioMed Central", "bmc", currentyear, pubyear, total) _print_summary_line(summary, "Co-Action Publishing", currentyear, pubyear, total) _print_summary_line(summary, "Edinburgh University Press", "eup", currentyear, pubyear, total) _print_summary_line(summary, "Elsevier", "elsevier", currentyear, pubyear, total) _print_summary_line(summary, "Georg Thieme Verlag", currentyear, pubyear, total) _print_summary_line(summary, "IOP Publishing", "iop", currentyear, pubyear, total) _print_summary_line(summary, "John Wiley and Sons", "wiley", currentyear, pubyear, total) _print_summary_line(summary, "Liverpool University Press", "lup", currentyear, pubyear, total) _print_summary_line(summary, "Nature Publishing Group", "npg", currentyear, pubyear, total) _print_summary_line(summary, "Oxford University Press", "oup", currentyear, pubyear, total) _print_summary_line(summary, "Rockefeller University Press", "rup", currentyear, pubyear, total) _print_summary_line(summary, "RSC Publishing", "rsc", currentyear, pubyear, total) _print_summary_line(summary, "Royal Society Publishing", "rs", currentyear, pubyear, total) _print_summary_line(summary, "SAGE Publications", "sage", currentyear, pubyear, total) _print_summary_line(summary, "Springer", "springer", currentyear, pubyear, total) _print_summary_line(summary, "Society for the Study of Reproduction", currentyear, pubyear, total) _print_summary_line(summary, "Taylor and Francis", "tf", currentyear, pubyear, total)
|
_print_summary_line(summary, "Society for the Study of Reproduction", currentyear, pubyear, total)
|
_print_summary_line(summary, "Society for the Study of Reproduction", "ssr", currentyear, pubyear, total)
|
def _main_procedure(): parser = _make_command_line_parser() (options, args) = parser.parse_args(values=parser.get_default_values()) _check_required_options(parser, options) db = MySQLdb.connect(host="localhost", user="edwardsb", passwd=options.dbpassword, db="burp") _update_required_options(db, options) # Initialize the hashes. # WARNING: If you update this list, you need to update three places: # 1. The list of publishers in the 'while auid is not None' loop. # 2. The summary report # 3. In BurpCheck.py, the _is_reported() method. publishers = ['aap', 'aip', 'ama', 'aps', 'acm', 'bep', 'bmc', 'cap', 'eup', 'elsevier', 'gtv', 'iop', 'wiley', 'lup', 'npg', 'oup', 'rup', 'rsc', 'rsp', 'sage', 'springer', 'ssr', 'tf'] total = {} pubyear = {} for publisher in publishers: pubyear[publisher] = {} total[publisher] = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) pubyear[publisher][strYear] = 0 cursorAuid = db.cursor() cursorAuid.execute("SELECT DISTINCT(auid) from burp WHERE rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) + "' order by auid;") auid = cursorAuid.fetchone() while auid is not None: cursorArticles = db.cursor() cursorArticles.execute("SELECT MAX(numarticles), auyear FROM burp WHERE auid = \"" + auid[0] + "\" and rundate >= '" + str(options.reportdatestart) + "' AND rundate <= '" + str(options.reportdateend) +"';") articles = cursorArticles.fetchone() strYear = articles[1] if "-" in strYear: strYear = strYear[5:9] # If you change this list, be sure to change the equivalent # list in BurpCheck.py. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("aappublications") != -1): pubyear['aap'][strYear] += articles[0] total['aap'] += articles[0] # No criteria for American Institute of Physics. if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("ama-assn") != -1): pubyear['ama'][strYear] += articles[0] total['ama'] += articles[0] if (auid[0].find("ClockssHighWirePlugin") != -1) and (auid[0].find("physiology%2Eorg") != -1): pubyear['aps'][strYear] += articles[0] total['aps'] += articles[0] # No criteria for Association for Computing Machinery if (auid[0].find("ClockssBerkeleyElectronicPressPlugin") != -1): pubyear['bep'][strYear] += articles[0] total['bep'] += articles[0] # No criteria for BioMed Central. if (auid[0].find("ClockssCoActionPublishingPlugin") != -1): pubyear['cap'][strYear] += articles[0] total['cap'] += articles[0] if (auid[0].find("ClockssEdinburghUniversityPressPlugin") != -1): pubyear['eup'][strYear] += articles[0] total['eup'] += articles[0] # No criteria for Elsevier. if (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1): pubyear['gtv'][strYear] += articles[0] total['gtv'] += articles[0] # No criteria for IOP Publishing # No criteria for Wiley # No criteria for Liverpool if (auid[0].find("ClockssNaturePublishingGroupPlugin") != -1): pubyear['npg'][strYear] += articles[0] total['npg'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("oxfordjournals") != -1): pubyear['oup'][strYear] += articles[0] total['oup'] += articles[0] # No criteria for Rockefeller University Press if (auid[0].find("ClockssRoyalSocietyOfChemistryPlugin") != -1): pubyear['rsc'][strYear] += articles[0] total['rsc'] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0): pubyear['rsp'][strYear] += articles[0] total['rsp'][strYear] += articles[0] if (auid[0].find("HighWire") != -1) and (auid[0].find("sagepub") != -1): pubyear['sage'][strYear] += articles[0] total['sage'] += articles[0] # No criteria for Springer if (auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1): pubyear['ssr'][strYear] += articles[0] total['ssr'] += articles[0] # No criteria for Taylor & Francis auid = cursorAuid.fetchone() # Verify our numbers! for publisher in publishers: testTotal = 0 for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) testTotal += pubyear[publisher][strYear] if total[publisher] != testTotal: print "ERROR: Publisher " + publisher + " doesn't have the right total." # Post the report to the database for publisher in publishers: for year in range(options.currentyear, options.minimumyear - 1, -1) + [0]: strYear = str(year) cursorAuid.execute("INSERT INTO burpreport(rundate, publisher, auyear, numarticles) VALUES (NOW(), \"%s\", %d, %d);" % (publisher, year, pubyear[publisher][strYear])) # Output the main report. filename = open(options.filename, 'w') filename.write("Dates of ingest," + str(options.reportdatestart) + " - " + str(options.reportdateend) + "\n") filename.write("Date of report," + str(date.today()) + "\n") filename.write("year,") for publisher in publishers: filename.write(publisher.upper() + ",") # Yes, there's an extra comma at the end. Same with everything else. # If that's a problem, we can fix it. filename.write("\n"); for year in range(options.currentyear, options.minimumyear - 1, -1) + ["0"]: strYear = str(year) printYear = strYear if (year == options.currentyear): printYear = "Current Ingest " + strYear if (year == "0"): printYear = "Data entry in progress -- Year TBD" filename.write( printYear + "," ) for publisher in publishers: filename.write(str(pubyear[publisher][strYear]) + ",") filename.write("\n") filename.write("Back Ingest Total,") for publisher in publishers: filename.write(str(total[publisher]) + ",") filename.write("\n") # Output the new summary report. # WARNING: I don't have a simple way to keep track of names compared # against publishers. You need to update these in two places: the # list called 'publishers' and here. currentyear = str(options.currentyear) summary = open(options.summary, "w") summary.write("Official Publisher Name,Publisher ID, Total Ingest for " + str(currentyear) + ", Total Ingest For all time\n") _print_summary_line(summary, "American Academy of Pediatrics", "aap", currentyear, pubyear, total) _print_summary_line(summary, "American Institute of Physics", "aip", currentyear, pubyear, total) _print_summary_line(summary, "American Medical Association", "ama", currentyear, pubyear, total) _print_summary_line(summary, "American Physiological Society", "aps", currentyear, pubyear, total) _print_summary_line(summary, "Association for Computing Machinery", "acm", currentyear, pubyear, total) _print_summary_line(summary, "Berkeley Electronic Press", "bep", currentyear, pubyear, total) _print_summary_line(summary, "BioMed Central", "bmc", currentyear, pubyear, total) _print_summary_line(summary, "Co-Action Publishing", currentyear, pubyear, total) _print_summary_line(summary, "Edinburgh University Press", "eup", currentyear, pubyear, total) _print_summary_line(summary, "Elsevier", "elsevier", currentyear, pubyear, total) _print_summary_line(summary, "Georg Thieme Verlag", currentyear, pubyear, total) _print_summary_line(summary, "IOP Publishing", "iop", currentyear, pubyear, total) _print_summary_line(summary, "John Wiley and Sons", "wiley", currentyear, pubyear, total) _print_summary_line(summary, "Liverpool University Press", "lup", currentyear, pubyear, total) _print_summary_line(summary, "Nature Publishing Group", "npg", currentyear, pubyear, total) _print_summary_line(summary, "Oxford University Press", "oup", currentyear, pubyear, total) _print_summary_line(summary, "Rockefeller University Press", "rup", currentyear, pubyear, total) _print_summary_line(summary, "RSC Publishing", "rsc", currentyear, pubyear, total) _print_summary_line(summary, "Royal Society Publishing", "rs", currentyear, pubyear, total) _print_summary_line(summary, "SAGE Publications", "sage", currentyear, pubyear, total) _print_summary_line(summary, "Springer", "springer", currentyear, pubyear, total) _print_summary_line(summary, "Society for the Study of Reproduction", currentyear, pubyear, total) _print_summary_line(summary, "Taylor and Francis", "tf", currentyear, pubyear, total)
|
callback=__synonym_csv)
|
callback=__synonym_list)
|
def __synonym_list(opt, str, val, par): if getattr(par.values, TdboutConstants.OPTION_STYLE, None): par.error('cannot specify -%s and -%s together' % (TdboutConstants.OPTION_LIST_SHORT, TdboutConstants.OPTION_STYLE_SHORT)) setattr(par.values, TdboutConstants.OPTION_STYLE, TdboutConstants.OPTION_STYLE_LIST) setattr(par.values, TdboutConstants.OPTION_FIELDS, val)
|
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
def db_len(uistr, maxlen): if uistr is None: return None if len(uistr) > maxlen: __log('Warning: String is over %d characters long: %s' % (maxlen, uistr), options) return uistr[0:maxlen]
|
(auid.find("ClockssBerkeleyElectronicPressPlugin") != -1) or
|
def _is_reported(auid): # If you change this list, be sure to change the equivalent list # in BurpReport.py. return ( ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("aappublications") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("ama-assn") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("physiology%2Eorg") != -1)) or ((auid.find("ClockssBerkeleyElectronicPressPlugin") != -1)) or (auid.find("ClockssBerkeleyElectronicPressPlugin") != -1) or (auid.find("ClockssCoActionPublishingPlugin") != -1) or (auid.find("ClockssEdinburghUniversityPressPlugin") != -1) or (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1) or ((auid.find("ClockssNaturePublishingGroupPlugin") != -1)) or ((auid.find("HighWire") != -1) and (auid.find("oxfordjournals") != -1)) or ((auid.find("ClockssRoyalSocietyOfChemistryPlugin") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0)) or ((auid.find("HighWire") != -1) and (auid.find("sagepub") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1)) )
|
|
(auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1) or
|
(auid.find("ClockssGeorgThiemeVerlagPlugin") != -1) or
|
def _is_reported(auid): # If you change this list, be sure to change the equivalent list # in BurpReport.py. return ( ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("aappublications") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("ama-assn") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("physiology%2Eorg") != -1)) or ((auid.find("ClockssBerkeleyElectronicPressPlugin") != -1)) or (auid.find("ClockssBerkeleyElectronicPressPlugin") != -1) or (auid.find("ClockssCoActionPublishingPlugin") != -1) or (auid.find("ClockssEdinburghUniversityPressPlugin") != -1) or (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1) or ((auid.find("ClockssNaturePublishingGroupPlugin") != -1)) or ((auid.find("HighWire") != -1) and (auid.find("oxfordjournals") != -1)) or ((auid.find("ClockssRoyalSocietyOfChemistryPlugin") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0)) or ((auid.find("HighWire") != -1) and (auid.find("sagepub") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1)) )
|
((auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0)) or
|
((auid.find("HighWire") != -1) and (auid.find("royalsocietypublishing") != 0)) or
|
def _is_reported(auid): # If you change this list, be sure to change the equivalent list # in BurpReport.py. return ( ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("aappublications") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("ama-assn") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("physiology%2Eorg") != -1)) or ((auid.find("ClockssBerkeleyElectronicPressPlugin") != -1)) or (auid.find("ClockssBerkeleyElectronicPressPlugin") != -1) or (auid.find("ClockssCoActionPublishingPlugin") != -1) or (auid.find("ClockssEdinburghUniversityPressPlugin") != -1) or (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1) or ((auid.find("ClockssNaturePublishingGroupPlugin") != -1)) or ((auid.find("HighWire") != -1) and (auid.find("oxfordjournals") != -1)) or ((auid.find("ClockssRoyalSocietyOfChemistryPlugin") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0)) or ((auid.find("HighWire") != -1) and (auid.find("sagepub") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1)) )
|
((auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1))
|
((auid.find("HighWire") != -1) and (auid.find("biolreprod%2Eorg") != -1))
|
def _is_reported(auid): # If you change this list, be sure to change the equivalent list # in BurpReport.py. return ( ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("aappublications") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("ama-assn") != -1)) or ((auid.find("ClockssHighWirePlugin") != -1) and (auid.find("physiology%2Eorg") != -1)) or ((auid.find("ClockssBerkeleyElectronicPressPlugin") != -1)) or (auid.find("ClockssBerkeleyElectronicPressPlugin") != -1) or (auid.find("ClockssCoActionPublishingPlugin") != -1) or (auid.find("ClockssEdinburghUniversityPressPlugin") != -1) or (auid[0].find("ClockssGeorgThiemeVerlagPlugin") != -1) or ((auid.find("ClockssNaturePublishingGroupPlugin") != -1)) or ((auid.find("HighWire") != -1) and (auid.find("oxfordjournals") != -1)) or ((auid.find("ClockssRoyalSocietyOfChemistryPlugin") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("royalsocietypublishing") != 0)) or ((auid.find("HighWire") != -1) and (auid.find("sagepub") != -1)) or ((auid[0].find("HighWire") != -1) and (auid[0].find("biolreprod%2Eorg") != -1)) )
|
statuses = TdbqPredicate(lambda au: au.status() in [AU.STATUS_RELEASED, AU.STATUS_DOWN, AU.STATUS_SUPERSEDED, AU.STATUS_RETRACTED])
|
statuses = TdbqPredicate(lambda au: au.status() in [AU.Status.RELEASED, AU.Status.DOWN, AU.Status.SUPERSEDED, AU.Status.RETRACTED])
|
def tdbq_reprocess(tdb, options): '''Reprocesses a Tdb instance according to the query that may be included in the options. Returns the same Tdb instance if there is no query.''' if not (options.testingStatuses or options.productionStatuses or options.query): return tdb query = None if options.query: query = TdbqParser(TdbqScanner(options.query, options), options).parse() statuses = None if options.productionStatuses: statuses = TdbqPredicate(lambda au: au.status() in [AU.STATUS_RELEASED, AU.STATUS_DOWN, AU.STATUS_SUPERSEDED, AU.STATUS_RETRACTED]) elif options.testingStatuses: statuses = TdbqPredicate(lambda au: au.status() in [AU.STATUS_EXISTS, AU.STATUS_MANIFEST, AU.STATUS_WANTED, AU.STATUS_TESTING, AU.STATUS_NOT_READY, AU.STATUS_TESTED, AU.STATUS_RETESTING, AU.STATUS_READY, AU.STATUS_PRE_RELEASING, AU.STATUS_PRE_RELEASED, AU.STATUS_RELEASING, AU.STATUS_RELEASED, AU.STATUS_DOWN, AU.STATUS_SUPERSEDED, AU.STATUS_RETRACTED]) if query and statuses: prog = TdbqAnd(query, statuses) elif query: prog = query else: prog = statuses newtdb = Tdb() for au in tdb.aus(): if prog.keep_au(au): newtdb.add_au(au) if au.title() not in newtdb.titles(): newtdb.add_title(au.title()) if au.title().publisher() not in newtdb.publishers(): newtdb.add_publisher(au.title().publisher()) return newtdb
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.