rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
if reader is not None and \ reader.encryption_meta.is_encrypted(raster_cover): return
|
def get_cover(opf, opf_path, stream, reader=None): import posixpath from calibre.ebooks import render_html_svg_workaround from calibre.utils.logging import default_log raster_cover = opf.raster_cover stream.seek(0) zf = ZipFile(stream) if raster_cover: if reader is not None and \ reader.encryption_meta.is_encrypted(raster_cover): return base = posixpath.dirname(opf_path) cpath = posixpath.normpath(posixpath.join(base, raster_cover)) try: member = zf.getinfo(cpath) except: pass else: f = zf.open(member) data = f.read() f.close() zf.close() return data cpage = opf.first_spine_item() if not cpage: return if reader is not None and reader.encryption_meta.is_encrypted(cpage): return with TemporaryDirectory('_epub_meta') as tdir: with CurrentDir(tdir): zf.extractall() opf_path = opf_path.replace('/', os.sep) cpage = os.path.join(tdir, os.path.dirname(opf_path), cpage) if not os.path.exists(cpage): return return render_html_svg_workaround(cpage, default_log)
|
|
if DEBUG: traceback.print_exc()
|
traceback.print_exc()
|
def get_value(self, key, args, kwargs): try: key = key.lower() try: b = self.book.get_user_metadata(key, False) except: if DEBUG: traceback.print_exc() b = None
|
ext = os.path.splitext(iurl)[1] ext = ext[:5]
|
def process_images(self, soup, baseurl): diskpath = unicode_path(os.path.join(self.current_dir, 'images')) if not os.path.exists(diskpath): os.mkdir(diskpath) c = 0 for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')): iurl = tag['src'] if callable(self.image_url_processor): iurl = self.image_url_processor(baseurl, iurl) ext = os.path.splitext(iurl)[1] ext = ext[:5] if not urlparse.urlsplit(iurl).scheme: iurl = urlparse.urljoin(baseurl, iurl, False) with self.imagemap_lock: if self.imagemap.has_key(iurl): tag['src'] = self.imagemap[iurl] continue try: data = self.fetch_url(iurl) except Exception: self.log.exception('Could not fetch image %s'% iurl) continue c += 1 fname = ascii_filename('img'+str(c)+ext) if isinstance(fname, unicode): fname = fname.encode('ascii', 'replace') imgpath = os.path.join(diskpath, fname+'.jpg') try: im = Image.open(StringIO(data)).convert('RGBA') with self.imagemap_lock: self.imagemap[iurl] = imgpath with open(imgpath, 'wb') as x: im.save(x, 'JPEG') tag['src'] = imgpath except: traceback.print_exc() continue
|
|
fname = ascii_filename('img'+str(c)+ext)
|
fname = ascii_filename('img'+str(c))
|
def process_images(self, soup, baseurl): diskpath = unicode_path(os.path.join(self.current_dir, 'images')) if not os.path.exists(diskpath): os.mkdir(diskpath) c = 0 for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')): iurl = tag['src'] if callable(self.image_url_processor): iurl = self.image_url_processor(baseurl, iurl) ext = os.path.splitext(iurl)[1] ext = ext[:5] if not urlparse.urlsplit(iurl).scheme: iurl = urlparse.urljoin(baseurl, iurl, False) with self.imagemap_lock: if self.imagemap.has_key(iurl): tag['src'] = self.imagemap[iurl] continue try: data = self.fetch_url(iurl) except Exception: self.log.exception('Could not fetch image %s'% iurl) continue c += 1 fname = ascii_filename('img'+str(c)+ext) if isinstance(fname, unicode): fname = fname.encode('ascii', 'replace') imgpath = os.path.join(diskpath, fname+'.jpg') try: im = Image.open(StringIO(data)).convert('RGBA') with self.imagemap_lock: self.imagemap[iurl] = imgpath with open(imgpath, 'wb') as x: im.save(x, 'JPEG') tag['src'] = imgpath except: traceback.print_exc() continue
|
Qt.SplashScreen|Qt.WindowStaysOnTopHint)
|
Qt.SplashScreen)
|
def show_splash_screen(self): self.splash_pixmap = QPixmap() self.splash_pixmap.load(I('library.png')) self.splash_screen = QSplashScreen(self.splash_pixmap, Qt.SplashScreen|Qt.WindowStaysOnTopHint) self.splash_screen.showMessage(_('Starting %s: Loading books...') % __appname__) self.splash_screen.show() QApplication.instance().processEvents()
|
next = series_start_value series_start_value += 1
|
if self.series_start_value is None: self.series_start_value = series_start_value next = self.series_start_value self.series_start_value += 1
|
def do_one(self, id): remove, add, au, aus, do_aus, rating, pub, do_series, \ do_autonumber, do_remove_format, remove_format, do_swap_ta, \ do_remove_conv, do_auto_author, series, do_series_restart, \ series_start_value, do_title_case, clear_series = self.args
|
authorTag.insert(0, NavigableString("by "))
|
if title['read']: authorTag.insert(0, NavigableString(self.READ_SYMBOL + "by ")) else: authorTag.insert(0, NavigableString(self.NOT_READ_SYMBOL + "by "))
|
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir self.updateProgressFullStep("'Descriptions'")
|
this_title['notes'] = tag[1:]
|
this_title['notes'] = tag[len(self.opts.note_tag):]
|
def processSpecialTags(self, tags, this_title, opts): tag_list = [] for tag in tags: tag = self.convertHTMLEntities(tag) if tag.startswith(opts.note_tag): this_title['notes'] = tag[1:] elif tag == opts.read_tag: this_title['read'] = True elif re.search(opts.exclude_genre, tag): continue else: tag_list.append(tag) return tag_list
|
WINDOWS_MAIN_MEM = re.compile(r'CYBOOK_ORIZON__-FD') WINDOWS_CARD_A_MEM = re.compile('CYBOOK_ORIZON__-SD')
|
WINDOWS_MAIN_MEM = re.compile(r'(CYBOOK_ORIZON__-FD)|(FILE-STOR_GADGET)') WINDOWS_CARD_A_MEM = re.compile('(CYBOOK_ORIZON__-SD)|(FILE-STOR_GADGET)')
|
def can_handle(cls, device_info, debug=False): if isunix: return device_info[3] == 'Bookeen' and (device_info[4] == 'Cybook Gen3' or device_info[4] == 'Cybook Opus') return True
|
self.loaded_lang = lang
|
self.loaded_lang = lang_name(lang)
|
def lang_name(l): if l == 'en': l = 'en-us' return l.lower().replace('_', '-')
|
pl_book_ids = [self.book_by_id(i.getAttribute('id')).db_id for i in pl.childNodes if hasattr(i, 'getAttribute')]
|
pl_book_ids = [getattr(self.book_by_id(i), 'db_id', None) for i in db_ids]
|
def reorder_playlists(self): for title in self.tag_order.keys(): pl = self.playlist_by_title(title) if not pl: continue db_ids = [i.getAttribute('id') for i in pl.childNodes if hasattr(i, 'getAttribute')] pl_book_ids = [self.book_by_id(i.getAttribute('id')).db_id for i in pl.childNodes if hasattr(i, 'getAttribute')] map = {} for i, j in zip(pl_book_ids, db_ids): map[i] = j pl_book_ids = [i for i in pl_book_ids if i is not None] ordered_ids = [i for i in self.tag_order[title] if i in pl_book_ids]
|
raw = xml_to_unicode(open(toc, 'rb').read(), assume_utf8=True)[0] soup = NCXSoup(raw)
|
raw = xml_to_unicode(open(toc, 'rb').read(), assume_utf8=True, strip_encoding_pats=True)[0] root = etree.fromstring(raw, parser=etree.XMLParser(recover=True, no_network=True)) xpn = {'re': 'http://exslt.org/regular-expressions'} XPath = functools.partial(etree.XPath, namespaces=xpn) def get_attr(node, default=None, attr='playorder'): for name, val in node.attrib.items(): if name and val and name.lower().endswith(attr): return val return default nl_path = XPath('./*[re:match(local-name(), "navlabel$", "i")]') txt_path = XPath('./*[re:match(local-name(), "text$", "i")]') content_path = XPath('./*[re:match(local-name(), "content$", "i")]') np_path = XPath('./*[re:match(local-name(), "navpoint$", "i")]')
|
def read_ncx_toc(self, toc): self.base_path = os.path.dirname(toc) raw = xml_to_unicode(open(toc, 'rb').read(), assume_utf8=True)[0] soup = NCXSoup(raw)
|
play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1))
|
try: play_order = int(get_attr(np, 1)) except: play_order = 1
|
def process_navpoint(np, dest): play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1)) href = fragment = text = None nl = np.find(re.compile('navlabel')) if nl is not None: text = u'' for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt: return
|
nl = np.find(re.compile('navlabel')) if nl is not None:
|
nl = nl_path(np) if nl: nl = nl[0]
|
def process_navpoint(np, dest): play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1)) href = fragment = text = None nl = np.find(re.compile('navlabel')) if nl is not None: text = u'' for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt: return
|
for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt:
|
for txt in txt_path(nl): text += etree.tostring(txt, method='text', encoding=unicode, with_tail=False) content = content_path(np) if not content or not text:
|
def process_navpoint(np, dest): play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1)) href = fragment = text = None nl = np.find(re.compile('navlabel')) if nl is not None: text = u'' for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt: return
|
purl = urlparse(unquote(content['src']))
|
content = content[0] src = get_attr(content, attr='src') if src is None: return purl = urlparse(unquote(content.get('src')))
|
def process_navpoint(np, dest): play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1)) href = fragment = text = None nl = np.find(re.compile('navlabel')) if nl is not None: text = u'' for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt: return
|
for c in np: if 'navpoint' in getattr(c, 'name', ''): process_navpoint(c, nd) nm = soup.find(re.compile('navmap')) if nm is None:
|
for c in np_path(np): process_navpoint(c, nd) nm = XPath('//*[re:match(local-name(), "navmap$", "i")]')(root) if not nm:
|
def process_navpoint(np, dest): play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1)) href = fragment = text = None nl = np.find(re.compile('navlabel')) if nl is not None: text = u'' for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt: return
|
for elem in nm: if 'navpoint' in getattr(elem, 'name', ''): process_navpoint(elem, self)
|
nm = nm[0] for child in np_path(nm): process_navpoint(child, self)
|
def process_navpoint(np, dest): play_order = np.get('playOrder', None) if play_order is None: play_order = int(np.get('playorder', 1)) href = fragment = text = None nl = np.find(re.compile('navlabel')) if nl is not None: text = u'' for txt in nl.findAll(re.compile('text')): text += u''.join([unicode(s) for s in txt.findAll(text=True)]) content = np.find(re.compile('content')) if content is None or not content.has_key('src') or not txt: return
|
notification=DummyReporter(),
|
report_progress=DummyReporter(),
|
def __init__(self, db, opts, plugin, notification=DummyReporter(), stylesheet="content/stylesheet.css"): self.__opts = opts self.__authors = None self.__basename = opts.basename self.__booksByAuthor = None self.__booksByTitle = None self.__catalogPath = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='') self.__contentDir = os.path.join(self.catalogPath, "content") self.__creator = opts.creator self.__db = db self.__descriptionClip = opts.descriptionClip self.__error = None self.__generateForKindle = True if (self.opts.fmt == 'mobi' and \ self.opts.output_profile and \ self.opts.output_profile.startswith("kindle")) else False self.__genres = None self.__htmlFileList = [] self.__markerTags = self.getMarkerTags() self.__ncxSoup = None self.__playOrder = 1 self.__plugin = plugin self.__plugin_path = opts.plugin_path self.__progressInt = 0.0 self.__progressString = '' self.__reporter = notification self.__stylesheet = stylesheet self.__thumbs = None self.__title = opts.catalog_title self.__verbose = opts.verbose
|
self.__reporter = notification
|
self.__reporter = report_progress
|
def __init__(self, db, opts, plugin, notification=DummyReporter(), stylesheet="content/stylesheet.css"): self.__opts = opts self.__authors = None self.__basename = opts.basename self.__booksByAuthor = None self.__booksByTitle = None self.__catalogPath = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='') self.__contentDir = os.path.join(self.catalogPath, "content") self.__creator = opts.creator self.__db = db self.__descriptionClip = opts.descriptionClip self.__error = None self.__generateForKindle = True if (self.opts.fmt == 'mobi' and \ self.opts.output_profile and \ self.opts.output_profile.startswith("kindle")) else False self.__genres = None self.__htmlFileList = [] self.__markerTags = self.getMarkerTags() self.__ncxSoup = None self.__playOrder = 1 self.__plugin = plugin self.__plugin_path = opts.plugin_path self.__progressInt = 0.0 self.__progressString = '' self.__reporter = notification self.__stylesheet = stylesheet self.__thumbs = None self.__title = opts.catalog_title self.__verbose = opts.verbose
|
self.opts.log.info("CatalogBuilder(): Generating %s %s"% \ (self.opts.fmt, "for %s" % self.opts.output_profile if self.opts.output_profile \ else ''))
|
def __init__(self, db, opts, plugin, notification=DummyReporter(), stylesheet="content/stylesheet.css"): self.__opts = opts self.__authors = None self.__basename = opts.basename self.__booksByAuthor = None self.__booksByTitle = None self.__catalogPath = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='') self.__contentDir = os.path.join(self.catalogPath, "content") self.__creator = opts.creator self.__db = db self.__descriptionClip = opts.descriptionClip self.__error = None self.__generateForKindle = True if (self.opts.fmt == 'mobi' and \ self.opts.output_profile and \ self.opts.output_profile.startswith("kindle")) else False self.__genres = None self.__htmlFileList = [] self.__markerTags = self.getMarkerTags() self.__ncxSoup = None self.__playOrder = 1 self.__plugin = plugin self.__plugin_path = opts.plugin_path self.__progressInt = 0.0 self.__progressString = '' self.__reporter = notification self.__stylesheet = stylesheet self.__thumbs = None self.__title = opts.catalog_title self.__verbose = opts.verbose
|
|
if getattr(self.reporter, 'cancel_requested', False): return 1 if not self.booksByTitle: self.fetchBooksByTitle() if getattr(self.reporter, 'cancel_requested', False): return 1
|
self.fetchBooksByTitle()
|
def buildSources(self): if getattr(self.reporter, 'cancel_requested', False): return 1 if not self.booksByTitle: self.fetchBooksByTitle()
|
if getattr(self.reporter, 'cancel_requested', False): return 1
|
def buildSources(self): if getattr(self.reporter, 'cancel_requested', False): return 1 if not self.booksByTitle: self.fetchBooksByTitle()
|
|
return 0
|
def buildSources(self): if getattr(self.reporter, 'cancel_requested', False): return 1 if not self.booksByTitle: self.fetchBooksByTitle()
|
|
self.opts.log.info(self.updateProgressFullStep("fetchBooksByTitle()"))
|
self.updateProgressFullStep("Fetching database")
|
def fetchBooksByTitle(self): self.opts.log.info(self.updateProgressFullStep("fetchBooksByTitle()"))
|
this_title['date'] = strftime(u'%b %Y', record['pubdate'].timetuple())
|
this_title['date'] = strftime(u'%B %Y', record['pubdate'].timetuple())
|
def fetchBooksByTitle(self): self.opts.log.info(self.updateProgressFullStep("fetchBooksByTitle()"))
|
self.opts.log.info(self.updateProgressFullStep("fetchBooksByAuthor()"))
|
self.updateProgressFullStep("Sorting database by author")
|
def fetchBooksByAuthor(self): # Generate a list of titles sorted by author from the database
|
self.opts.log.info(self.updateProgressFullStep("generateHTMLDescriptions()"))
|
self.updateProgressFullStep("Description")
|
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir self.opts.log.info(self.updateProgressFullStep("generateHTMLDescriptions()"))
|
self.updateProgressMicroStep("generating book descriptions ...", float(title_num*100/len(self.booksByTitle))/100)
|
self.updateProgressMicroStep("Description %d of %d" % \ (title_num, len(self.booksByTitle)), float(title_num*100/len(self.booksByTitle))/100)
|
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir self.opts.log.info(self.updateProgressFullStep("generateHTMLDescriptions()"))
|
self.opts.log.info(self.updateProgressFullStep("generateHTMLByTitle()"))
|
self.updateProgressFullStep("Books by Title")
|
def generateHTMLByTitle(self): # Write books by title A-Z to HTML file
|
self.opts.log.info(self.updateProgressFullStep("generateHTMLByAuthor()"))
|
self.updateProgressFullStep("Books by Author")
|
def generateHTMLByAuthor(self): # Write books by author A-Z self.opts.log.info(self.updateProgressFullStep("generateHTMLByAuthor()"))
|
date_string = strftime(u'%b %Y', current_date.timetuple())
|
date_string = strftime(u'%B %Y', current_date.timetuple())
|
def add_books_to_HTML(this_months_list, dtc): if len(this_months_list): date_string = strftime(u'%b %Y', current_date.timetuple()) this_months_list = sorted(this_months_list, key=lambda x:(x['title_sort'], x['title_sort'])) this_months_list = sorted(this_months_list, key=lambda x:(x['author_sort'], x['author_sort'])) # Create a new month anchor pIndexTag = Tag(soup, "p") pIndexTag['class'] = "date_index" aTag = Tag(soup, "a") aTag['name'] = "%s-%s" % (current_date.year, current_date.month) pIndexTag.insert(0,aTag) pIndexTag.insert(1,NavigableString('%s %s' % \ (self.MONTHS[current_date.month],current_date.year))) divTag.insert(dtc,pIndexTag) dtc += 1 current_author = None
|
pIndexTag.insert(1,NavigableString('%s %s' % \ (self.MONTHS[current_date.month],current_date.year)))
|
pIndexTag.insert(1,NavigableString(date_string))
|
def add_books_to_HTML(this_months_list, dtc): if len(this_months_list): date_string = strftime(u'%b %Y', current_date.timetuple()) this_months_list = sorted(this_months_list, key=lambda x:(x['title_sort'], x['title_sort'])) this_months_list = sorted(this_months_list, key=lambda x:(x['author_sort'], x['author_sort'])) # Create a new month anchor pIndexTag = Tag(soup, "p") pIndexTag['class'] = "date_index" aTag = Tag(soup, "a") aTag['name'] = "%s-%s" % (current_date.year, current_date.month) pIndexTag.insert(0,aTag) pIndexTag.insert(1,NavigableString('%s %s' % \ (self.MONTHS[current_date.month],current_date.year))) divTag.insert(dtc,pIndexTag) dtc += 1 current_author = None
|
self.opts.log.info(self.updateProgressFullStep("generateHTMLByDateAdded()"))
|
def add_books_to_HTML(this_months_list, dtc): if len(this_months_list): date_string = strftime(u'%b %Y', current_date.timetuple()) this_months_list = sorted(this_months_list, key=lambda x:(x['title_sort'], x['title_sort'])) this_months_list = sorted(this_months_list, key=lambda x:(x['author_sort'], x['author_sort'])) # Create a new month anchor pIndexTag = Tag(soup, "p") pIndexTag['class'] = "date_index" aTag = Tag(soup, "a") aTag['name'] = "%s-%s" % (current_date.year, current_date.month) pIndexTag.insert(0,aTag) pIndexTag.insert(1,NavigableString('%s %s' % \ (self.MONTHS[current_date.month],current_date.year))) divTag.insert(dtc,pIndexTag) dtc += 1 current_author = None
|
|
self.opts.log.info(self.updateProgressFullStep("generateHTMLByTags()"))
|
self.updateProgressFullStep("Generating Genres")
|
def generateHTMLByTags(self): # Generate individual HTML files for each tag, e.g. Fiction, Nonfiction ... # Note that special tags - ~+*[] - have already been filtered from books[]
|
self.updateProgressMicroStep("generating thumbnails ...",
|
self.updateProgressMicroStep("Thumbnail %d of %d" % \ (i,len(self.booksByTitle)),
|
def generateThumbnails(self): # Generate a thumbnail per cover. If a current thumbnail exists, skip # If a cover doesn't exist, use default # Return list of active thumbs
|
if self.verbose:
|
if False and self.verbose:
|
def generateThumbnails(self): # Generate a thumbnail per cover. If a current thumbnail exists, skip # If a cover doesn't exist, use default # Return list of active thumbs
|
self.opts.log.info(self.updateProgressFullStep("generateOPF()"))
|
self.updateProgressFullStep("Generating OPF")
|
def generateOPF(self):
|
self.opts.log.info(self.updateProgressFullStep("generateNCXHeader()"))
|
self.updateProgressFullStep("NCX header")
|
def generateNCXHeader(self):
|
self.opts.log.info(self.updateProgressFullStep("generateNCXDescriptions()"))
|
self.updateProgressFullStep("NCX descriptions")
|
def generateNCXDescriptions(self, tocTitle):
|
self.opts.log.info(self.updateProgressFullStep("generateNCXByTitle()"))
|
self.updateProgressFullStep("NCX Titles")
|
def generateNCXByTitle(self, tocTitle): self.opts.log.info(self.updateProgressFullStep("generateNCXByTitle()"))
|
self.opts.log.info(self.updateProgressFullStep("generateNCXByAuthor()"))
|
self.updateProgressFullStep("NCX Authors")
|
def generateNCXByAuthor(self, tocTitle): self.opts.log.info(self.updateProgressFullStep("generateNCXByAuthor()"))
|
self.opts.log.info(self.updateProgressFullStep("generateNCXByDateAdded()"))
|
self.updateProgressFullStep("NCX Recently Added")
|
def generateNCXByDateAdded(self, tocTitle): self.opts.log.info(self.updateProgressFullStep("generateNCXByDateAdded()"))
|
datestr = strftime(u'%b %Y', books_by_month[1].timetuple())
|
datestr = strftime(u'%B %Y', books_by_month[1].timetuple())
|
def add_to_master_month_list(current_titles_list): current_titles_list = " • ".join(current_titles_list) current_titles_list = self.generateShortDescription(self.formatNCXText(current_titles_list)) master_month_list.append((current_titles_list, current_date))
|
textTag.insert(0, NavigableString("Books added in " + datestr))
|
textTag.insert(0, NavigableString(datestr))
|
def add_to_master_month_list(current_titles_list): current_titles_list = " • ".join(current_titles_list) current_titles_list = self.generateShortDescription(self.formatNCXText(current_titles_list)) master_month_list.append((current_titles_list, current_date))
|
self.opts.log.info(self.updateProgressFullStep("generateNCXByGenre()"))
|
self.updateProgressFullStep("NCX by Genre")
|
def generateNCXByGenre(self, tocTitle): # Create an NCX section for 'By Genre' # Add each genre as an article # 'tag', 'file', 'authors'
|
self.opts.log.info(self.updateProgressFullStep("writeNCX()"))
|
self.updateProgressFullStep("Writing NCX")
|
def writeNCX(self): self.opts.log.info(self.updateProgressFullStep("writeNCX()"))
|
self.opts.log.info(' %d Genre tags in database (exclude_genre: %s):' % \
|
self.opts.log.info(' %d Genre tags (exclude_genre: %s):' % \
|
def filterDbTags(self, tags): # Remove the special marker tags from the database's tag list, # return sorted list of tags representing valid genres
|
self.reporter(self.progressInt/100., self.progressString) return u"%.2f%% %s" % (self.progressInt, self.progressString)
|
self.reporter(self.progressInt, self.progressString)
|
def updateProgressFullStep(self, description):
|
self.reporter(self.progressInt/100., self.progressString) return u"%.2f%% %s" % (self.progressInt, self.progressString)
|
self.reporter(self.progressInt, self.progressString)
|
def updateProgressMicroStep(self, description, micro_step_pct): step_range = 100/self.total_steps self.progressString = description coarse_progress = float((self.current_step-1)/self.total_steps) fine_progress = float((micro_step_pct*step_range)/100) self.progressInt = coarse_progress + fine_progress self.reporter(self.progressInt/100., self.progressString) return u"%.2f%% %s" % (self.progressInt, self.progressString)
|
log("%s:run" % self.name) log(" path_to_output: %s" % path_to_output) log(" Output format: %s" % self.fmt)
|
log("%s(): Generating %s for %s" % (self.name,self.fmt,opts.output_profile))
|
def run(self, path_to_output, opts, db, notification=DummyReporter()):
|
if key == 'ids': if opts_dict[key]: continue else: log(" %s: (all)" % key) log(" %s: %s" % (key, opts_dict[key]))
|
if key in ['catalog_title','exclude_genre','exclude_tags','note_tag', 'numbers_as_text','read_tag','search_text','sort_by']: log(" %s: %s" % (key, opts_dict[key]))
|
def run(self, path_to_output, opts, db, notification=DummyReporter()):
|
catalog = self.CatalogBuilder(db, opts, self, notification=notification)
|
catalog = self.CatalogBuilder(db, opts, self, report_progress=notification)
|
def run(self, path_to_output, opts, db, notification=DummyReporter()):
|
path = os.path.join(self.rootdir, urlunquote(path))
|
try: path = os.path.join(self.rootdir, urlunquote(path)) except ValueError: return False
|
def exists(self, path): path = os.path.join(self.rootdir, urlunquote(path)) return os.path.isfile(path)
|
rev = ('rev_%4.4x'%c).replace('a', ':') if rev in device_id:
|
rev = 'rev_%4.4x'%c if rev in device_id or rev.replace('a', ':') in device_id:
|
def test_bcd_windows(self, device_id, bcd): if bcd is None or len(bcd) == 0: return True for c in bcd: # Bug in winutil.get_usb_devices converts a to : rev = ('rev_%4.4x'%c).replace('a', ':') if rev in device_id: return True return False
|
self.log.debug('%s saved to %s'%( url, res))
|
self.log.debug(url, 'saved to', res)
|
def start_fetch(self, url): soup = BeautifulSoup(u'<a href="'+url+'" />') self.log.debug('Downloading') res = self.process_links(soup, url, 0, into_dir='') self.log.debug('%s saved to %s'%( url, res)) return res
|
self.log.exception('Could not fetch stylesheet %s'% iurl)
|
self.log.exception('Could not fetch stylesheet ', iurl)
|
def process_stylesheets(self, soup, baseurl): diskpath = unicode_path(os.path.join(self.current_dir, 'stylesheets')) if not os.path.exists(diskpath): os.mkdir(diskpath) for c, tag in enumerate(soup.findAll(lambda tag: tag.name.lower()in ['link', 'style'] and tag.has_key('type') and tag['type'].lower() == 'text/css')): if tag.has_key('href'): iurl = tag['href'] if not urlparse.urlsplit(iurl).scheme: iurl = urlparse.urljoin(baseurl, iurl, False) with self.stylemap_lock: if self.stylemap.has_key(iurl): tag['href'] = self.stylemap[iurl] continue try: data = self.fetch_url(iurl) except Exception: self.log.exception('Could not fetch stylesheet %s'% iurl) continue stylepath = os.path.join(diskpath, 'style'+str(c)+'.css') with self.stylemap_lock: self.stylemap[iurl] = stylepath with open(stylepath, 'wb') as x: x.write(data) tag['href'] = stylepath else: for ns in tag.findAll(text=True): src = str(ns) m = self.__class__.CSS_IMPORT_PATTERN.search(src) if m: iurl = m.group(1) if not urlparse.urlsplit(iurl).scheme: iurl = urlparse.urljoin(baseurl, iurl, False) with self.stylemap_lock: if self.stylemap.has_key(iurl): ns.replaceWith(src.replace(m.group(1), self.stylemap[iurl])) continue try: data = self.fetch_url(iurl) except Exception: self.log.exception('Could not fetch stylesheet %s'% iurl) continue c += 1 stylepath = os.path.join(diskpath, 'style'+str(c)+'.css') with self.stylemap_lock: self.stylemap[iurl] = stylepath with open(stylepath, 'wb') as x: x.write(data) ns.replaceWith(src.replace(m.group(1), stylepath))
|
self.log.exception('Could not fetch image %s'% iurl)
|
self.log.exception('Could not fetch image ', iurl)
|
def process_images(self, soup, baseurl): diskpath = unicode_path(os.path.join(self.current_dir, 'images')) if not os.path.exists(diskpath): os.mkdir(diskpath) c = 0 for tag in soup.findAll(lambda tag: tag.name.lower()=='img' and tag.has_key('src')): iurl = tag['src'] if callable(self.image_url_processor): iurl = self.image_url_processor(baseurl, iurl) if not urlparse.urlsplit(iurl).scheme: iurl = urlparse.urljoin(baseurl, iurl, False) with self.imagemap_lock: if self.imagemap.has_key(iurl): tag['src'] = self.imagemap[iurl] continue try: data = self.fetch_url(iurl) if data == 'GIF89a\x01': # Skip empty GIF files as PIL errors on them anyway continue except Exception: self.log.exception('Could not fetch image %s'% iurl) continue c += 1 fname = ascii_filename('img'+str(c)) if isinstance(fname, unicode): fname = fname.encode('ascii', 'replace') imgpath = os.path.join(diskpath, fname+'.jpg') try: im = Image.open(StringIO(data)).convert('RGBA') with self.imagemap_lock: self.imagemap[iurl] = imgpath with open(imgpath, 'wb') as x: im.save(x, 'JPEG') tag['src'] = imgpath except: traceback.print_exc() continue
|
raise ValueError('No content at URL %s'%iurl)
|
raise ValueError('No content at URL %r'%iurl)
|
def process_links(self, soup, baseurl, recursion_level, into_dir='links'): res = '' diskpath = os.path.join(self.current_dir, into_dir) if not os.path.exists(diskpath): os.mkdir(diskpath) prev_dir = self.current_dir try: self.current_dir = diskpath tags = list(soup.findAll('a', href=True))
|
pass
|
import traceback traceback.print_exc()
|
def cover_flow_do_sync(self): self.cover_flow_sync_flag = True try: if self.cover_flow.isVisible() and self.cf_last_updated_at is not None and \ time.time() - self.cf_last_updated_at > 0.5: self.cf_last_updated_at = None row = self.cover_flow.currentSlide() m = self.library_view.model() index = m.index(row, 0) if self.library_view.currentIndex().row() != row and index.isValid(): self.cover_flow_sync_flag = False self.library_view.scroll_to_row(index.row()) sm = self.library_view.selectionModel() sm.select(index, sm.ClearAndSelect|sm.Rows) self.library_view.setCurrentIndex(index) except: pass
|
re.compile(r'<meta.*?content=[\'"].*?charset=([^\s\'"]+).*?[\'"].*?>',
|
re.compile(r'''<meta\s+?[^<>]+?content=['"][^'"]*?charset=([-a-z0-9]+)[^'"]*?['"][^<>]*>''',
|
def detect(aBuf): import calibre.ebooks.chardet.universaldetector as universaldetector u = universaldetector.UniversalDetector() u.reset() u.feed(aBuf) u.close() return u.result
|
self.book_on_device(None, None, reset=True)
|
self.book_on_device(None, reset=True)
|
def books_deleted(self, job): ''' Called once deletion is done on the device ''' for view in (self.memory_view, self.card_a_view, self.card_b_view): view.model().deletion_done(job, job.failed) if job.failed: self.device_job_exception(job) return
|
def book_on_device(self, id, format=None, reset=False):
|
def book_on_device(self, id, reset=False):
|
def book_on_device(self, id, format=None, reset=False): ''' Return an indication of whether the given book represented by its db id is on the currently connected device. It returns a 5 element list. The first three elements represent memory locations main, carda, and cardb, and are true if the book is identifiably in that memory. The fourth is a count of how many instances of the book were found across all the memory locations. The fifth is a set of paths to the matching books on the device. ''' loc = [None, None, None, 0, set([])]
|
if db_id is None: db_id = book.db_id
|
def book_on_device(self, id, format=None, reset=False): ''' Return an indication of whether the given book represented by its db id is on the currently connected device. It returns a 5 element list. The first three elements represent memory locations main, carda, and cardb, and are true if the book is identifiably in that memory. The fourth is a count of how many instances of the book were found across all the memory locations. The fifth is a set of paths to the matching books on the device. ''' loc = [None, None, None, 0, set([])]
|
|
def get_library_path():
|
def get_default_library_path(): fname = _('Calibre Library') if isinstance(fname, unicode): try: fname = fname.encode(filesystem_encoding) except: fname = 'Calibre Library' x = os.path.expanduser('~'+os.sep+fname) if not os.path.exists(x): try: os.makedirs(x) except: x = os.path.expanduser('~') return x def get_library_path(parent=None):
|
def get_library_path(): library_path = prefs['library_path'] if library_path is None: # Need to migrate to new database layout base = os.path.expanduser('~') if iswindows: base = plugins['winutil'][0].special_folder_path( plugins['winutil'][0].CSIDL_PERSONAL) if not base or not os.path.exists(base): from PyQt4.Qt import QDir base = unicode(QDir.homePath()).replace('/', os.sep) candidate = choose_dir(None, 'choose calibre library', _('Choose a location for your calibre e-book library'), default_dir=base) if not candidate: candidate = os.path.join(base, 'Calibre Library') library_path = os.path.abspath(candidate) if not os.path.exists(library_path): try: os.makedirs(library_path) except: error_dialog(None, _('Failed to create library'), _('Failed to create calibre library at: %r. Aborting.')%library_path, det_msg=traceback.format_exc(), show=True) library_path = None return library_path
|
error_dialog(None, _('Failed to create library'), _('Failed to create calibre library at: %r. Aborting.')%library_path,
|
error_dialog(parent, _('Failed to create library'), _('Failed to create calibre library at: %r.')%library_path,
|
def get_library_path(): library_path = prefs['library_path'] if library_path is None: # Need to migrate to new database layout base = os.path.expanduser('~') if iswindows: base = plugins['winutil'][0].special_folder_path( plugins['winutil'][0].CSIDL_PERSONAL) if not base or not os.path.exists(base): from PyQt4.Qt import QDir base = unicode(QDir.homePath()).replace('/', os.sep) candidate = choose_dir(None, 'choose calibre library', _('Choose a location for your calibre e-book library'), default_dir=base) if not candidate: candidate = os.path.join(base, 'Calibre Library') library_path = os.path.abspath(candidate) if not os.path.exists(library_path): try: os.makedirs(library_path) except: error_dialog(None, _('Failed to create library'), _('Failed to create calibre library at: %r. Aborting.')%library_path, det_msg=traceback.format_exc(), show=True) library_path = None return library_path
|
library_path = None
|
library_path = choose_dir(parent, 'choose calibre library', _('Choose a location for your new calibre e-book library'), default_dir=get_default_library_path())
|
def get_library_path(): library_path = prefs['library_path'] if library_path is None: # Need to migrate to new database layout base = os.path.expanduser('~') if iswindows: base = plugins['winutil'][0].special_folder_path( plugins['winutil'][0].CSIDL_PERSONAL) if not base or not os.path.exists(base): from PyQt4.Qt import QDir base = unicode(QDir.homePath()).replace('/', os.sep) candidate = choose_dir(None, 'choose calibre library', _('Choose a location for your calibre e-book library'), default_dir=base) if not candidate: candidate = os.path.join(base, 'Calibre Library') library_path = os.path.abspath(candidate) if not os.path.exists(library_path): try: os.makedirs(library_path) except: error_dialog(None, _('Failed to create library'), _('Failed to create calibre library at: %r. Aborting.')%library_path, det_msg=traceback.format_exc(), show=True) library_path = None return library_path
|
fname = _('Calibre Library') if isinstance(fname, unicode): try: fname = fname.encode(filesystem_encoding) except: fname = 'Calibre Library' x = os.path.expanduser('~'+os.sep+fname) if not os.path.exists(x): try: os.makedirs(x) except: x = os.path.expanduser('~')
|
def initialize_db_stage2(self, db, tb): repair_pd = getattr(self, 'repair_pd', None) if repair_pd is not None: repair_pd.cancel()
|
|
default_dir=x)
|
default_dir=get_default_library_path())
|
def initialize_db_stage2(self, db, tb): repair_pd = getattr(self, 'repair_pd', None) if repair_pd is not None: repair_pd.cancel()
|
self.library_path = get_library_path() if self.library_path is None:
|
self.library_path = get_library_path(parent=self.splash_screen) if not self.library_path:
|
def initialize(self, *args): if gprefs.get('show_splash_screen', True): self.show_splash_screen()
|
unmount_device = pyqtSignal()
|
umount_device = pyqtSignal()
|
def location_for_row(self, row): if row == 0: return 'library' if row == 1: return 'main' if row == 3: return 'cardb' return 'carda' if self.free[1] > -1 else 'cardb'
|
self.unmount_device.emit()
|
self.umount_device.emit()
|
def eject_clicked(self, *args): self.unmount_device.emit()
|
def generate(): scn = 'calibre_browse_server_sort_' if category: sort_opts = [('rating', _('Average rating')), ('name', _('Name')), ('popularity', _('Popularity'))] scn += 'category' else: scn += 'list' fm = self.db.field_metadata sort_opts, added = [], set([]) for x in fm.sortable_field_keys(): n = fm[x]['name'] if n not in added: added.add(n) sort_opts.append((x, n)) ans = P('content_server/browse/browse.html', data=True).decode('utf-8') ans = ans.replace('{sort_select_label}', xml(_('Sort by')+':')) ans = ans.replace('{sort_cookie_name}', scn) opts = ['<option %svalue="%s">%s</option>' % ( 'selected="selected" ' if k==sort else '', xml(k), xml(n), ) for k, n in sorted(sort_opts, key=operator.itemgetter(1)) if k and n] ans = ans.replace('{sort_select_options}', ('\n'+' '*20).join(opts)) lp = self.db.library_path if isbytestring(lp): lp = force_unicode(lp, filesystem_encoding) if isinstance(ans, unicode): ans = ans.encode('utf-8') ans = ans.replace('{library_name}', xml(os.path.basename(lp))) ans = ans.replace('{library_path}', xml(lp, True)) ans = ans.replace('{initial_search}', initial_search) return ans if self.opts.develop: return generate() if not hasattr(self, '__browse_template__'): self.__browse_template__ = generate()
|
if not hasattr(self, '__browse_template__') or \ self.opts.develop: self.__browse_template__ = \ P('content_server/browse/browse.html', data=True).decode('utf-8') ans = self.__browse_template__ scn = 'calibre_browse_server_sort_' if category: sort_opts = [('rating', _('Average rating')), ('name', _('Name')), ('popularity', _('Popularity'))] scn += 'category' else: scn += 'list' fm = self.db.field_metadata sort_opts, added = [], set([]) for x in fm.sortable_field_keys(): n = fm[x]['name'] if n not in added: added.add(n) sort_opts.append((x, n)) ans = ans.replace('{sort_select_label}', xml(_('Sort by')+':')) ans = ans.replace('{sort_cookie_name}', scn) opts = ['<option %svalue="%s">%s</option>' % ( 'selected="selected" ' if k==sort else '', xml(k), xml(n), ) for k, n in sorted(sort_opts, key=operator.itemgetter(1)) if k and n] ans = ans.replace('{sort_select_options}', ('\n'+' '*20).join(opts)) lp = self.db.library_path if isbytestring(lp): lp = force_unicode(lp, filesystem_encoding) if isinstance(ans, unicode): ans = ans.encode('utf-8') ans = ans.replace('{library_name}', xml(os.path.basename(lp))) ans = ans.replace('{library_path}', xml(lp, True)) ans = ans.replace('{initial_search}', initial_search) return ans
|
def generate(): scn = 'calibre_browse_server_sort_'
|
else: return (key, key, None, None)
|
def format_field_extended(self, key, series_with_index=True): from calibre.ebooks.metadata import authors_to_string ''' returns the tuple (field_name, formatted_value) '''
|
|
_field_metadata = [
|
_field_metadata_prototype = [
|
def __init__(self, icon_dict): for a in self.category_icons: if a not in icon_dict: raise ValueError('Missing category icon [%s]'%a) self[a] = icon_dict[a]
|
self.setZoomFactor(150)
|
def __init__(self, parent=None): pictureflow.PictureFlow.__init__(self, parent, config['cover_flow_queue_length']+1) self.setMinimumSize(QSize(300, 150)) self.setFocusPolicy(Qt.WheelFocus) self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)) self.setZoomFactor(150)
|
|
self.content_server.state_callback = \ Dispatcher(self.content_server_state_changed)
|
if self.content_server is not None: self.content_server.state_callback = \ Dispatcher(self.content_server_state_changed)
|
def do_config(self, checked=False, initial_category='general'): if self.job_manager.has_jobs(): d = error_dialog(self, _('Cannot configure'), _('Cannot configure while there are running jobs.')) d.exec_() return if self.must_restart_before_config: d = error_dialog(self, _('Cannot configure'), _('Cannot configure before calibre is restarted.')) d.exec_() return d = ConfigDialog(self, self.library_view, server=self.content_server, initial_category=initial_category)
|
feeds = [list(reversed(feed)) for feed in feeds]
|
feeds = [list(reversed(list(feed))) for feed in feeds]
|
def build_index(self): self.report_progress(0, _('Fetching feeds...')) try: feeds = feeds_from_index(self.parse_index(), oldest_article=self.oldest_article, max_articles_per_feed=self.max_articles_per_feed, log=self.log) self.report_progress(0, _('Got feeds from index page')) except NotImplementedError: feeds = self.parse_feeds()
|
try: self.view_format.disconnect() except: pass self.view_format = None self.db = None self.pi = None self.cover_data = self.cpixmap = None
|
def disconnect(signal): try: signal.disconnect() except: pass disconnect(self.view_format) for b in ('next_button', 'prev_button'): x = getattr(self, b, None) if x is not None: disconnect(x.clicked)
|
def break_cycles(self): try: self.view_format.disconnect() except: pass # Fails if view format was never connected self.view_format = None self.db = None self.pi = None self.cover_data = self.cpixmap = None
|
for i in range(3):
|
for i in range(5):
|
def break_cycles(self): try: self.view_format.disconnect() except: pass # Fails if view format was never connected self.view_format = None self.db = None self.pi = None self.cover_data = self.cpixmap = None
|
gc.collect()
|
def break_cycles(self): try: self.view_format.disconnect() except: pass # Fails if view format was never connected self.view_format = None self.db = None self.pi = None self.cover_data = self.cpixmap = None
|
|
d.reject()
|
def break_cycles(self): try: self.view_format.disconnect() except: pass # Fails if view format was never connected self.view_format = None self.db = None self.pi = None self.cover_data = self.cpixmap = None
|
|
gc.collect() ''' nmap, omap = {}, {} for x in objects: omap[id(x)] = x for x in nobjects: nmap[id(x)] = x new_ids = set(nmap.keys()) - set(omap.keys()) print "New ids:", len(new_ids) for i in new_ids: o = nmap[i] if o is objects: continue print repr(o)[:1050] refs = gc.get_referrers(o) for r in refs: if r is objects or r is nobjects: continue print '\t', r '''
|
def break_cycles(self): try: self.view_format.disconnect() except: pass # Fails if view format was never connected self.view_format = None self.db = None self.pi = None self.cover_data = self.cpixmap = None
|
|
.format(_('Choose a category to browse by:'), '\n\n'.join(cats))
|
.format(_('Choose a category to browse by:'), u'\n\n'.join(cats))
|
def getter(x): return category_meta[x]['name'].lower()
|
self._model.db.supports_collections() and \ prefs['preserve_user_collections'])
|
callable(getattr(self._model.db, 'supports_collections', None)) and \ self._model.db.supports_collections() and \ prefs['preserve_user_collections'])
|
def contextMenuEvent(self, event): self.edit_collections_menu.setVisible( self._model.db.supports_collections() and \ prefs['preserve_user_collections']) self.context_menu.popup(event.globalPos()) event.accept()
|
fmt_list.append(format.partition('.')[2])
|
fmt_list.append(format.rpartition('.')[2].lower())
|
def run(self, path_to_output, opts, db, notification=DummyReporter()): self.fmt = path_to_output.rpartition('.')[2] self.notification = notification
|
fwversion = f.readline().split(',')[2]
|
self.fwversion = f.readline().split(',')[2]
|
def books(self, oncard=None, end_session=True): from calibre.ebooks.metadata.meta import path_to_ext
|
if fwversion != '1.0' and fwversion != '1.4':
|
if self.fwversion != '1.0' and self.fwversion != '1.4':
|
def books(self, oncard=None, end_session=True): from calibre.ebooks.metadata.meta import path_to_ext
|
debug_print('Version of firmware: ', fwversion, 'Has kepubs:', self.has_kepubs)
|
debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs)
|
def books(self, oncard=None, end_session=True): from calibre.ebooks.metadata.meta import path_to_ext
|
debug_print('delete_via_sql: ContentID: ', ContentID, 'ContentType: ', ContentType)
|
def delete_via_sql(self, ContentID, ContentType): # Delete Order: # 1) shortcover_page # 2) volume_shorcover # 2) content
|
|
ContentType = 901
|
if self.fwversion == '1.0' or self.fwversion == '1.4' or self.fwversion == '1.7.4': ContentType = 999 else: ContentType = 901
|
def get_content_type_from_extension(self, extension): if extension == '.kobo': # Kobo books do not have book files. They do have some images though #print "kobo book" ContentType = 6 elif extension == '.pdf' or extension == '.epub': # print "ePub or pdf" ContentType = 16 elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html': # print "txt" ContentType = 901 else: # if extension == '.html' or extension == '.txt': ContentType = 999 # Yet another hack: to get around Kobo changing how ContentID is stored return ContentType
|
sval = ts.strftime('%Y-%m-%d')
|
try: sval = ts.strftime('%Y-%m-%d') except: from calibre import strftime sval = strftime('%Y-%m-%d', ts.timetuple())
|
def fix_pubdates(self): dirtied = False opf = self.container.opf for dcdate in opf.xpath('//dc:date', namespaces={'dc':'http://purl.org/dc/elements/1.1/'}): raw = dcdate.text if not raw: raw = '' default = strptime('2000-1-1', '%Y-%m-%d', as_utc=True) try: ts = parse_date(raw, assume_utc=False, as_utc=True, default=default) except: raise InvalidEpub('Invalid date set in OPF', raw) sval = ts.strftime('%Y-%m-%d') if sval != raw: self.log.error( 'OPF contains date', raw, 'that epubcheck does not like') if self.fix: dcdate.text = sval self.log('\tReplaced', raw, 'with', sval) dirtied = True if dirtied: self.container.set(self.container.opf_name, opf)
|
return self.vformat(b['display']['composite_template'], [], kwargs)
|
if key in self.composite_values: return self.composite_values[key] self.composite_values[key] = 'RECURSIVE_COMPOSITE FIELD (S2D) ' + key self.composite_values[key] = \ self.vformat(b['display']['composite_template'], [], kwargs) return self.composite_values[key]
|
def get_value(self, key, args, kwargs): try: b = self.book.get_user_metadata(key, False) key = key.lower() if b is not None and b['datatype'] == 'composite': return self.vformat(b['display']['composite_template'], [], kwargs) if kwargs[key]: return self.sanitize(kwargs[key.lower()]) return '' except: return ''
|
elif plugboard_any_format_value in plugboards:
|
if dev_name in cpb: cpb = cpb[dev_name] else: cpb = None if cpb is None and plugboard_any_format_value in plugboards:
|
def save_book_to_disk(id, db, root, opts, length): mi = db.get_metadata(id, index_is_id=True) available_formats = db.formats(id, index_is_id=True) if not available_formats: available_formats = [] else: available_formats = [x.lower().strip() for x in available_formats.split(',')] if opts.formats == 'all': asked_formats = available_formats else: asked_formats = [x.lower().strip() for x in opts.formats.split(',')] formats = set(available_formats).intersection(set(asked_formats)) if not formats: return True, id, mi.title components = get_components(opts.template, mi, id, opts.timefmt, length, ascii_filename if opts.asciiize else sanitize_file_name, to_lowercase=opts.to_lowercase, replace_whitespace=opts.replace_whitespace) base_path = os.path.join(root, *components) base_name = os.path.basename(base_path) dirpath = os.path.dirname(base_path) # Don't test for existence first are the test could fail but # another worker process could create the directory before # the call to makedirs try: os.makedirs(dirpath) except BaseException: if not os.path.exists(dirpath): raise cdata = db.cover(id, index_is_id=True) if opts.save_cover: if cdata is not None: with open(base_path+'.jpg', 'wb') as f: f.write(cdata) mi.cover = base_name+'.jpg' else: mi.cover = None if opts.write_opf: opf = metadata_to_opf(mi) with open(base_path+'.opf', 'wb') as f: f.write(opf) if cdata is not None: mi.cover_data = ('jpg', cdata) mi.cover = None written = False for fmt in formats: global plugboard_save_to_disk_value, plugboard_any_format_value dev_name = plugboard_save_to_disk_value plugboards = db.prefs.get('plugboards', {}) cpb = None if fmt in plugboards: cpb = plugboards[fmt] elif plugboard_any_format_value in plugboards: cpb = plugboards[plugboard_any_format_value] # must find a save_to_disk entry for this format if cpb is not None: if dev_name in cpb: cpb = cpb[dev_name] else: cpb = None prints('Using plugboard:', fmt, cpb) data = db.format(id, fmt, index_is_id=True) if data is None: continue else: written = True if opts.update_metadata: stream = cStringIO.StringIO() stream.write(data) stream.seek(0) try: if cpb: newmi = mi.deepcopy() newmi.copy_specific_attributes(mi, cpb) else: newmi = mi set_metadata(stream, newmi, fmt) except: traceback.print_exc() stream.seek(0) data = stream.read() fmt_path = base_path+'.'+str(fmt) with open(fmt_path, 'wb') as f: f.write(data) return not written, id, mi.title
|
if cpb is not None:
|
def save_book_to_disk(id, db, root, opts, length): mi = db.get_metadata(id, index_is_id=True) available_formats = db.formats(id, index_is_id=True) if not available_formats: available_formats = [] else: available_formats = [x.lower().strip() for x in available_formats.split(',')] if opts.formats == 'all': asked_formats = available_formats else: asked_formats = [x.lower().strip() for x in opts.formats.split(',')] formats = set(available_formats).intersection(set(asked_formats)) if not formats: return True, id, mi.title components = get_components(opts.template, mi, id, opts.timefmt, length, ascii_filename if opts.asciiize else sanitize_file_name, to_lowercase=opts.to_lowercase, replace_whitespace=opts.replace_whitespace) base_path = os.path.join(root, *components) base_name = os.path.basename(base_path) dirpath = os.path.dirname(base_path) # Don't test for existence first are the test could fail but # another worker process could create the directory before # the call to makedirs try: os.makedirs(dirpath) except BaseException: if not os.path.exists(dirpath): raise cdata = db.cover(id, index_is_id=True) if opts.save_cover: if cdata is not None: with open(base_path+'.jpg', 'wb') as f: f.write(cdata) mi.cover = base_name+'.jpg' else: mi.cover = None if opts.write_opf: opf = metadata_to_opf(mi) with open(base_path+'.opf', 'wb') as f: f.write(opf) if cdata is not None: mi.cover_data = ('jpg', cdata) mi.cover = None written = False for fmt in formats: global plugboard_save_to_disk_value, plugboard_any_format_value dev_name = plugboard_save_to_disk_value plugboards = db.prefs.get('plugboards', {}) cpb = None if fmt in plugboards: cpb = plugboards[fmt] elif plugboard_any_format_value in plugboards: cpb = plugboards[plugboard_any_format_value] # must find a save_to_disk entry for this format if cpb is not None: if dev_name in cpb: cpb = cpb[dev_name] else: cpb = None prints('Using plugboard:', fmt, cpb) data = db.format(id, fmt, index_is_id=True) if data is None: continue else: written = True if opts.update_metadata: stream = cStringIO.StringIO() stream.write(data) stream.seek(0) try: if cpb: newmi = mi.deepcopy() newmi.copy_specific_attributes(mi, cpb) else: newmi = mi set_metadata(stream, newmi, fmt) except: traceback.print_exc() stream.seek(0) data = stream.read() fmt_path = base_path+'.'+str(fmt) with open(fmt_path, 'wb') as f: f.write(data) return not written, id, mi.title
|
|
series_index = '%04d%%s' % (integer, str('%0.4f' % fraction).lstrip('0'))
|
series_index = '%04d%s' % (integer, str('%0.4f' % fraction).lstrip('0'))
|
def _update_iTunes_metadata(self, metadata, db_added, lb_added, this_book): ''' ''' if DEBUG: self.log.info(" ITUNES._update_iTunes_metadata()")
|
ids = list(set(ids).difference(_auto_ids))
|
nids = list(set(ids).difference(_auto_ids)) ids = [i for i in ids if i in nids]
|
def send_by_mail(self, to, fmts, delete_from_library, send_ids=None, do_auto_convert=True, specific_format=None): ids = [self.library_view.model().id(r) for r in self.library_view.selectionModel().selectedRows()] if send_ids is None else send_ids if not ids or len(ids) == 0: return files, _auto_ids = self.library_view.model().get_preferred_formats_from_ids(ids, fmts, paths=True, set_metadata=True, specific_format=specific_format, exclude_auto=do_auto_convert) if do_auto_convert: ids = list(set(ids).difference(_auto_ids)) else: _auto_ids = []
|
'%s'%errors
|
'%s'%errors, show=True
|
def emails_sent(self, results, remove=[]): errors, good = [], [] for jobname, exception, tb in results: title = jobname.partition(':')[-1] if exception is not None: errors.append([title, exception, tb]) else: good.append(title) if errors: errors = '\n'.join([ '%s\n\n%s\n%s\n' % (title, e, tb) for \ title, e, tb in errors ]) error_dialog(self, _('Failed to email books'), _('Failed to email the following books:'), '%s'%errors ) else: self.status_bar.showMessage(_('Sent by email:') + ', '.join(good), 5000)
|
pat = self.OSX_MAIN_MEM_VOL_PAT if pat is not None and len(drives) > 1 and 'main' in drives: if pat.search(drives['main']) is None: main = drives['main'] for x in ('carda', 'cardb'): if x in drives and pat.search(drives[x]): drives['main'] = drives.pop(x) drives[x] = main break
|
def dcmp(x, y): ''' Sorting based on the following scheme: - disks without partitions are first - sub sorted based on disk number - disks with partitions are sorted first on disk number, then on partition number ''' x = x.rpartition('/')[-1] y = y.rpartition('/')[-1] x, y = nums(x), nums(y) if x[1] == 0 and y[1] > 0: return cmp(1, 2) if x[1] > 0 and y[1] == 0: return cmp(2, 1) ans = cmp(x[0], y[0]) if ans == 0: ans = cmp(x[1], y[1]) return ans
|
|
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\''
|
query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ReadStatus = 1 and ContentID like \'file:///mnt/sd/%\''
|
def update_device_database_collections(self, booklists, collections_attributes, oncard):
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.