rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if not sys.argv[1:]: print __doc__ sys.exit(0)
try: from optparse import OptionParser except: OptionParser = None if OptionParser: optionParser = OptionParser(version=__version__, usage="%prog [options] url_or_filename_or_-") optionParser.set_defaults(format="pprint") optionParser.add_option("-A", "--user-agent", dest="agent", metavar="AGENT", help="User-Agent for HTTP URLs") optionParser.add_option("-e", "--referer", "--referrer", dest="referrer", metavar="URL", help="Referrer for HTTP URLs") optionParser.add_option("-t", "--etag", dest="etag", metavar="TAG", help="ETag/If-None-Match for HTTP URLs") optionParser.add_option("-m", "--last-modified", dest="modified", metavar="DATE", help="Last-modified/If-Modified-Since for HTTP URLs (any supported date format)") optionParser.add_option("-f", "--format", dest="format", metavar="FORMAT", help="output results in FORMAT (text, pprint)") optionParser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="write debugging information to stderr") (options, urls) = optionParser.parse_args() if options.verbose: _debug = 1 if not urls: optionParser.print_help() sys.exit(0)
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result
from pprint import pprint
serializer = globals().get(options.format.capitalize() + 'Serializer', Serializer)
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result
print url print result = parse(url) pprint(result) print
results = parse(url, etag=options.etag, modified=options.modified, agent=options.agent, referrer=options.referrer) serializer(results).write(sys.stdout)
def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=[]): '''Parse a feed from a URL, file, stream, or string''' result = FeedParserDict() result['feed'] = FeedParserDict() result['entries'] = [] if _XML_AVAILABLE: result['bozo'] = 0 if type(handlers) == types.InstanceType: handlers = [handlers] try: f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers) data = f.read() except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' f = None # if feed is gzip-compressed, decompress it if f and data and hasattr(f, 'headers'): if gzip and f.headers.get('content-encoding', '') == 'gzip': try: data = gzip.GzipFile(fileobj=_StringIO(data)).read() except Exception, e: # Some feeds claim to be gzipped but they're not, so # we get garbage. Ideally, we should re-request the # feed without the 'Accept-encoding: gzip' header, # but we don't. result['bozo'] = 1 result['bozo_exception'] = e data = '' elif zlib and f.headers.get('content-encoding', '') == 'deflate': try: data = zlib.decompress(data, -zlib.MAX_WBITS) except Exception, e: result['bozo'] = 1 result['bozo_exception'] = e data = '' # save HTTP headers if hasattr(f, 'info'): info = f.info() result['etag'] = info.getheader('ETag') last_modified = info.getheader('Last-Modified') if last_modified: result['modified'] = _parse_date(last_modified) if hasattr(f, 'url'): result['href'] = f.url result['status'] = 200 if hasattr(f, 'status'): result['status'] = f.status if hasattr(f, 'headers'): result['headers'] = f.headers.dict if hasattr(f, 'close'): f.close() # there are four encodings to keep track of: # - http_encoding is the encoding declared in the Content-Type HTTP header # - xml_encoding is the encoding declared in the <?xml declaration # - sniffed_encoding is the encoding sniffed from the first 4 bytes of the XML data # - result['encoding'] is the actual encoding, as per RFC 3023 and a variety of other conflicting specifications http_headers = result.get('headers', {}) result['encoding'], http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type = \ _getCharacterEncoding(http_headers, data) if http_headers and (not acceptable_content_type): if http_headers.has_key('content-type'): bozo_message = '%s is not an XML media type' % http_headers['content-type'] else: bozo_message = 'no Content-type specified' result['bozo'] = 1 result['bozo_exception'] = NonXMLContentType(bozo_message) result['version'], data = _stripDoctype(data) baseuri = http_headers.get('content-location', result.get('href')) baselang = http_headers.get('content-language', None) # if server sent 304, we're done if result.get('status', 0) == 304: result['version'] = '' result['debug_message'] = 'The feed has not changed since you last checked, ' + \ 'so the server sent no data. This is a feature, not a bug!' return result # if there was a problem downloading, we're done if not data: return result # determine character encoding use_strict_parser = 0 known_encoding = 0 tried_encodings = [] # try: HTTP encoding, declared XML encoding, encoding sniffed from BOM for proposed_encoding in (result['encoding'], xml_encoding, sniffed_xml_encoding): if not proposed_encoding: continue if proposed_encoding in tried_encodings: continue tried_encodings.append(proposed_encoding) try: data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 break except: pass # if no luck and we have auto-detection library, try that if (not known_encoding) and chardet: try: proposed_encoding = chardet.detect(data)['encoding'] if proposed_encoding and (proposed_encoding not in tried_encodings): tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried utf-8 yet, try that if (not known_encoding) and ('utf-8' not in tried_encodings): try: proposed_encoding = 'utf-8' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck and we haven't tried windows-1252 yet, try that if (not known_encoding) and ('windows-1252' not in tried_encodings): try: proposed_encoding = 'windows-1252' tried_encodings.append(proposed_encoding) data = _toUTF8(data, proposed_encoding) known_encoding = use_strict_parser = 1 except: pass # if still no luck, give up if not known_encoding: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingUnknown( \ 'document encoding unknown, I tried ' + \ '%s, %s, utf-8, and windows-1252 but nothing worked' % \ (result['encoding'], xml_encoding)) result['encoding'] = '' elif proposed_encoding != result['encoding']: result['bozo'] = 1 result['bozo_exception'] = CharacterEncodingOverride( \ 'documented declared as %s, but parsed as %s' % \ (result['encoding'], proposed_encoding)) result['encoding'] = proposed_encoding if not _XML_AVAILABLE: use_strict_parser = 0 if use_strict_parser: # initialize the SAX parser feedparser = _StrictFeedParser(baseuri, baselang, 'utf-8') saxparser = xml.sax.make_parser(PREFERRED_XML_PARSERS) saxparser.setFeature(xml.sax.handler.feature_namespaces, 1) saxparser.setContentHandler(feedparser) saxparser.setErrorHandler(feedparser) source = xml.sax.xmlreader.InputSource() source.setByteStream(_StringIO(data)) if hasattr(saxparser, '_ns_stack'): # work around bug in built-in SAX parser (doesn't recognize xml: namespace) # PyXML doesn't have this problem, and it doesn't have _ns_stack either saxparser._ns_stack.append({'http://www.w3.org/XML/1998/namespace':'xml'}) try: saxparser.parse(source) except Exception, e: if _debug: import traceback traceback.print_stack() traceback.print_exc() sys.stderr.write('xml parsing failed\n') result['bozo'] = 1 result['bozo_exception'] = feedparser.exc or e use_strict_parser = 0 if not use_strict_parser: feedparser = _LooseFeedParser(baseuri, baselang, known_encoding and 'utf-8' or '') feedparser.feed(data) result['feed'] = feedparser.feeddata result['entries'] = feedparser.entries result['version'] = result['version'] or feedparser.version result['namespaces'] = feedparser.namespacesInUse return result
ap = self.action_preferences pm.addAction(ap)
pm.addAction(QIcon(I('config.svg')), _('Preferences'), self.do_config)
def __init__(self): md = QMenu() md.addAction(_('Edit metadata individually'), partial(self.edit_metadata, False, bulk=False)) md.addSeparator() md.addAction(_('Edit metadata in bulk'), partial(self.edit_metadata, False, bulk=True)) md.addSeparator() md.addAction(_('Download metadata and covers'), partial(self.download_metadata, False, covers=True)) md.addAction(_('Download only metadata'), partial(self.download_metadata, False, covers=False)) md.addAction(_('Download only covers'), partial(self.download_metadata, False, covers=True, set_metadata=False, set_social_metadata=False)) md.addAction(_('Download only social metadata'), partial(self.download_metadata, False, covers=False, set_metadata=False, set_social_metadata=True)) self.metadata_menu = md
warnings = [(x[0], unicode(x[1])) for x in \
warnings = [(x[0], force_unicode(x[1])) for x in \
def hangcheck(self): if self.fetcher.is_alive() and \ time.time() - self.start_time < self.HANG_TIME: return self._hangcheck.stop() try: if self.fetcher.is_alive(): error_dialog(self, _('Could not find metadata'), _('The metadata download seems to have stalled. ' 'Try again later.')).exec_() self.terminate() return self.queue_reject.emit() self.model = Matches(self.fetcher.results) warnings = [(x[0], unicode(x[1])) for x in \ self.fetcher.exceptions if x[1] is not None] if warnings: warnings='<br>'.join(['<b>%s</b>: %s'%(name, exc) for name,exc in warnings]) self.warning.setText('<p><b>'+ _('Warning')+':</b>'+\ _('Could not fetch metadata from:')+\ '<br>'+warnings+'</p>') self.warning.setVisible(True) if self.model.rowCount() < 1: info_dialog(self, _('No metadata found'), _('No metadata found, try adjusting the title and author ' 'and/or removing the ISBN.')).exec_() self.reject() return
d = error_dialog(self.window, _('Cannot read'),
d = error_dialog(self.parent(), _('Cannot read'),
def select_cover(self): files = choose_images(self, 'change cover dialog', _('Choose cover for ') + unicode(self.title.text())) if not files: return _file = files[0] if _file: _file = os.path.abspath(_file) if not os.access(_file, os.R_OK): d = error_dialog(self.window, _('Cannot read'), _('You do not have permission to read the file: ') + _file) d.exec_() return cf, cover = None, None try: cf = open(_file, "rb") cover = cf.read() except IOError, e: d = error_dialog(self.window, _('Error reading file'), _("<p>There was an error reading from file: <br /><b>") + _file + "</b></p><br />"+str(e)) d.exec_() if cover: pix = QPixmap() pix.loadFromData(cover) if pix.isNull(): d = error_dialog(self.window, _('Error reading file'), _file + _(" is not a valid picture")) d.exec_() else: self.cover_path.setText(_file) self.cover.setPixmap(pix) self.cover_changed = True self.cpixmap = pix self.cover_data = cover
d = error_dialog(self.window, _('Error reading file'),
d = error_dialog(self.parent(), _('Error reading file'),
def select_cover(self): files = choose_images(self, 'change cover dialog', _('Choose cover for ') + unicode(self.title.text())) if not files: return _file = files[0] if _file: _file = os.path.abspath(_file) if not os.access(_file, os.R_OK): d = error_dialog(self.window, _('Cannot read'), _('You do not have permission to read the file: ') + _file) d.exec_() return cf, cover = None, None try: cf = open(_file, "rb") cover = cf.read() except IOError, e: d = error_dialog(self.window, _('Error reading file'), _("<p>There was an error reading from file: <br /><b>") + _file + "</b></p><br />"+str(e)) d.exec_() if cover: pix = QPixmap() pix.loadFromData(cover) if pix.isNull(): d = error_dialog(self.window, _('Error reading file'), _file + _(" is not a valid picture")) d.exec_() else: self.cover_path.setText(_file) self.cover.setPixmap(pix) self.cover_changed = True self.cpixmap = pix self.cover_data = cover
self.books = [] if isinstance(root, unicode): root = root.encode(filesystem_encoding)
def run(self): root = os.path.abspath(self.path) self.books = [] if isinstance(root, unicode): root = root.encode(filesystem_encoding) try: for dirpath in os.walk(root): if self.canceled: return self.emit(SIGNAL('update(PyQt_PyObject)'), _('Searching in')+' '+dirpath[0]) self.books += list(self.db.find_books_in_directory(dirpath[0], self.single_book_per_directory)) except Exception, err: import traceback traceback.print_exc() try: msg = unicode(err) except: msg = repr(err) self.emit(SIGNAL('found(PyQt_PyObject)'), msg) return
for dirpath in os.walk(root): if self.canceled: return self.emit(SIGNAL('update(PyQt_PyObject)'), _('Searching in')+' '+dirpath[0]) self.books += list(self.db.find_books_in_directory(dirpath[0], self.single_book_per_directory)) except Exception, err: import traceback traceback.print_exc()
self.walk(root) except:
def run(self): root = os.path.abspath(self.path) self.books = [] if isinstance(root, unicode): root = root.encode(filesystem_encoding) try: for dirpath in os.walk(root): if self.canceled: return self.emit(SIGNAL('update(PyQt_PyObject)'), _('Searching in')+' '+dirpath[0]) self.books += list(self.db.find_books_in_directory(dirpath[0], self.single_book_per_directory)) except Exception, err: import traceback traceback.print_exc() try: msg = unicode(err) except: msg = repr(err) self.emit(SIGNAL('found(PyQt_PyObject)'), msg) return
msg = unicode(err) except: msg = repr(err) self.emit(SIGNAL('found(PyQt_PyObject)'), msg) return
if isinstance(root, unicode): root = root.encode(filesystem_encoding) self.walk(root) except Exception, err: import traceback traceback.print_exc() try: msg = unicode(err) except: msg = repr(err) self.emit(SIGNAL('found(PyQt_PyObject)'), msg) return
def run(self): root = os.path.abspath(self.path) self.books = [] if isinstance(root, unicode): root = root.encode(filesystem_encoding) try: for dirpath in os.walk(root): if self.canceled: return self.emit(SIGNAL('update(PyQt_PyObject)'), _('Searching in')+' '+dirpath[0]) self.books += list(self.db.find_books_in_directory(dirpath[0], self.single_book_per_directory)) except Exception, err: import traceback traceback.print_exc() try: msg = unicode(err) except: msg = repr(err) self.emit(SIGNAL('found(PyQt_PyObject)'), msg) return
rows = self.gui.library_view.selectionModel().selectedRows()
rows = list(self.gui.library_view.selectionModel().selectedRows())
def view_specific_format(self, triggered): rows = self.gui.library_view.selectionModel().selectedRows() if not rows or len(rows) == 0: d = error_dialog(self.gui, _('Cannot view'), _('No book selected')) d.exec_() return
row = rows[0].row() formats = self.gui.library_view.model().db.formats(row).upper().split(',') d = ChooseFormatDialog(self.gui, _('Choose the format to view'), formats)
db = self.gui.library_view.model().db rows = [r.row() for r in rows] formats = [db.formats(row) for row in rows] formats = [list(f.upper().split(',')) if f else None for f in formats] all_fmts = set([]) for x in formats: for f in x: all_fmts.add(f) d = ChooseFormatDialog(self.gui, _('Choose the format to view'), list(sorted(all_fmts)))
def view_specific_format(self, triggered): rows = self.gui.library_view.selectionModel().selectedRows() if not rows or len(rows) == 0: d = error_dialog(self.gui, _('Cannot view'), _('No book selected')) d.exec_() return
format = d.format() self.view_format(row, format)
fmt = d.format() orig_num = len(rows) rows = [rows[i] for i in range(len(rows)) if formats[i] and fmt in formats[i]] if self._view_check(len(rows)): for row in rows: self.view_format(row, fmt) if len(rows) < orig_num: info_dialog(self.gui, _('Format unavailable'), _('Not all the selected books were available in' ' the %s format. You should convert' ' them first.')%fmt, show=True)
def view_specific_format(self, triggered): rows = self.gui.library_view.selectionModel().selectedRows() if not rows or len(rows) == 0: d = error_dialog(self.gui, _('Cannot view'), _('No book selected')) d.exec_() return
ac('edit', _('Edit meta info'), 'edit_input.svg', _('E'))
ac('edit', _('Edit metadata'), 'edit_input.svg', _('E'))
def ac(name, text, icon, shortcut=None, tooltip=None): action = QAction(QIcon(I(icon)), text, self) text = tooltip if tooltip else text action.setToolTip(text) action.setStatusTip(text) action.setWhatsThis(text) action.setAutoRepeat(False) action.setObjectName('action_'+name) if shortcut: action.setShortcut(shortcut) setattr(self, 'action_'+name, action)
prints('Updating booklist:', i)
prints('Updating XML Cache:', i)
def update(self, booklists, collections_attributes): playlist_map = self.get_playlist_map()
self._details = unicode(err) + '\n\n' + \
try: ex = unicode(err) except: try: ex = str(err).decode(preferred_encoding, 'replace') except: ex = repr(err) self._details = ex + '\n\n' + \
def run(self): self.start_work() try: self.result = self.func(*self.args, **self.kwargs) if self._aborted: return except (Exception, SystemExit), err: if self._aborted: return self.failed = True self._details = unicode(err) + '\n\n' + \ traceback.format_exc() self.exception = err finally: self.job_done()
if ent.lower().startswith(u' num = int(ent[2:], 16) if encoding is None or num > 255: return check(my_unichr(num)) return check(chr(num).decode(encoding)) if ent.startswith(u'
if ent.startswith('
def check(ch): return result_exceptions.get(ch, ch)
num = int(ent[1:]) except ValueError:
if ent[1] in ('x', 'X'): num = int(ent[2:], 16) else: num = int(ent[1:]) except:
def check(ch): return result_exceptions.get(ch, ch)
href = '/browse/matches/%s/%s'%(q, id_)
href = '/browse/matches/%s/%s'%(quote(q), quote(id_))
def item(i): templ = (u'<div title="{4}" class="category-item">' '<div class="category-name">{0}</div><div>{1}</div>' '<div>{2}' '<span class="href">{3}</span></div></div>') rating, rstring = render_rating(i.avg_rating) name = xml(i.name) if datatype == 'rating': name = xml(_('%d stars')%int(i.avg_rating)) id_ = i.id if id_ is None: id_ = hexlify(force_unicode(name).encode('utf-8')) id_ = xml(str(id_)) desc = '' if i.count > 0: desc += '[' + _('%d books')%i.count + ']' q = i.category if not q: q = category href = '/browse/matches/%s/%s'%(q, id_) return templ.format(xml(name), rating, xml(desc), xml(href), rstring)
.format(xml(x, True), xml(y), xml(_('Browse books by')),
.format(xml(x, True), xml(quote(y)), xml(_('Browse books by')),
def getter(x): return category_meta[x]['name'].lower()
p = PT(text, STRONG(__appname__), A(url, href=url), style='text-align:left')
p = PT(text, STRONG(__appname__), A(url, href=url), style='text-align:left; max-width: 100%; overflow: hidden;')
def _generate(self, bottom, feed, art, number_of_articles_in_feed, two_levels, url, __appname__, prefix='', center=True, extra_css=None, style=None): head = HEAD(TITLE('navbar')) if style: head.append(STYLE(style, type='text/css')) if extra_css: head.append(STYLE(extra_css, type='text/css'))
_css_url_re = re.compile(r'url\((.*?)\)', re.I)
_css_url_re = re.compile(r'url\s*\((.*?)\)', re.I)
def CALIBRE(name): return '{%s}%s' % (CALIBRE_NS, name)
def iterlinks(root):
def iterlinks(root, find_links_in_css=True):
def iterlinks(root): ''' Iterate over all links in a OEB Document. :param root: A valid lxml.etree element. ''' assert etree.iselement(root) link_attrs = set(html.defs.link_attrs) link_attrs.add(XLINK('href')) for el in root.iter(): attribs = el.attrib try: tag = el.tag except UnicodeDecodeError: continue if tag == XHTML('object'): codebase = None ## <object> tags have attributes that are relative to ## codebase if 'codebase' in attribs: codebase = el.get('codebase') yield (el, 'codebase', codebase, 0) for attrib in 'classid', 'data': if attrib in attribs: value = el.get(attrib) if codebase is not None: value = urljoin(codebase, value) yield (el, attrib, value, 0) if 'archive' in attribs: for match in _archive_re.finditer(el.get('archive')): value = match.group(0) if codebase is not None: value = urljoin(codebase, value) yield (el, 'archive', value, match.start()) else: for attr in attribs: if attr in link_attrs: yield (el, attr, attribs[attr], 0) if tag == XHTML('style') and el.text: for match in _css_url_re.finditer(el.text): yield (el, None, match.group(1), match.start(1)) for match in _css_import_re.finditer(el.text): yield (el, None, match.group(1), match.start(1)) if 'style' in attribs: for match in _css_url_re.finditer(attribs['style']): yield (el, 'style', match.group(1), match.start(1))
for el, attrib, link, pos in iterlinks(root):
for el, attrib, link, pos in iterlinks(root, find_links_in_css=False):
def rewrite_links(root, link_repl_func, resolve_base_href=False): ''' Rewrite all the links in the document. For each link ``link_repl_func(link)`` will be called, and the return value will replace the old link. Note that links may not be absolute (unless you first called ``make_links_absolute()``), and may be internal (e.g., ``'#anchor'``). They can also be values like ``'mailto:email'`` or ``'javascript:expr'``. If the ``link_repl_func`` returns None, the attribute or tag text will be removed completely. ''' if resolve_base_href: resolve_base_href(root) for el, attrib, link, pos in iterlinks(root): new_link = link_repl_func(link.strip()) if new_link == link: continue if new_link is None: # Remove the attribute or element content if attrib is None: el.text = '' else: del el.attrib[attrib] continue if attrib is None: new = el.text[:pos] + new_link + el.text[pos+len(link):] el.text = new else: cur = el.attrib[attrib] if not pos and len(cur) == len(link): # Most common case el.attrib[attrib] = new_link else: new = cur[:pos] + new_link + cur[pos+len(link):] el.attrib[attrib] = new
return self.simple_error('', _('The lookup name must contain only lower case letters, digits and underscores, and start with a letter'))
return self.simple_error('', _('The lookup name must contain only ' 'lower case letters, digits and underscores, and start with a letter'))
def accept(self): col = unicode(self.column_name_box.text()) if not col: return self.simple_error('', _('No lookup name was provided')) if re.match('^\w*$', col) is None or not col[0].isalpha() or col.lower() != col: return self.simple_error('', _('The lookup name must contain only lower case letters, digits and underscores, and start with a letter')) if col.endswith('_index'): return self.simple_error('', _('Lookup names cannot end with _index, because these names are reserved for the index of a series column.')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col)
return self.simple_error('', _('Lookup names cannot end with _index, because these names are reserved for the index of a series column.'))
return self.simple_error('', _('Lookup names cannot end with _index, ' 'because these names are reserved for the index of a series column.'))
def accept(self): col = unicode(self.column_name_box.text()) if not col: return self.simple_error('', _('No lookup name was provided')) if re.match('^\w*$', col) is None or not col[0].isalpha() or col.lower() != col: return self.simple_error('', _('The lookup name must contain only lower case letters, digits and underscores, and start with a letter')) if col.endswith('_index'): return self.simple_error('', _('Lookup names cannot end with _index, because these names are reserved for the index of a series column.')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col)
if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number:
if not self.editing_col or \ self.parent.custcols[col]['colnum'] != self.orig_column_number:
def accept(self): col = unicode(self.column_name_box.text()) if not col: return self.simple_error('', _('No lookup name was provided')) if re.match('^\w*$', col) is None or not col[0].isalpha() or col.lower() != col: return self.simple_error('', _('The lookup name must contain only lower case letters, digits and underscores, and start with a letter')) if col.endswith('_index'): return self.simple_error('', _('Lookup names cannot end with _index, because these names are reserved for the index of a series column.')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col)
if not self.editing_col or self.parent.custcols[t]['colnum'] != self.orig_column_number:
if not self.editing_col or \ self.parent.custcols[t]['colnum'] != self.orig_column_number:
def accept(self): col = unicode(self.column_name_box.text()) if not col: return self.simple_error('', _('No lookup name was provided')) if re.match('^\w*$', col) is None or not col[0].isalpha() or col.lower() != col: return self.simple_error('', _('The lookup name must contain only lower case letters, digits and underscores, and start with a letter')) if col.endswith('_index'): return self.simple_error('', _('Lookup names cannot end with _index, because these names are reserved for the index of a series column.')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col)
return self.simple_error('', _('You must enter a template for composite fields')%col_heading)
return self.simple_error('', _('You must enter a template for composite fields'))
def accept(self): col = unicode(self.column_name_box.text()) if not col: return self.simple_error('', _('No lookup name was provided')) if re.match('^\w*$', col) is None or not col[0].isalpha() or col.lower() != col: return self.simple_error('', _('The lookup name must contain only lower case letters, digits and underscores, and start with a letter')) if col.endswith('_index'): return self.simple_error('', _('Lookup names cannot end with _index, because these names are reserved for the index of a series column.')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col)
inline = etree.SubElement(inline, XHTML('sup'))
parent = inline if istate.nest and bstate.inline is not None: parent = bstate.inline istate.nest = False inline = etree.SubElement(parent, XHTML('sup'))
def mobimlize_content(self, tag, text, bstate, istates): 'Convert text content' if text or tag != 'br': bstate.content = True istate = istates[-1] para = bstate.para if tag in SPECIAL_TAGS and not text: para = para if para is not None else bstate.body elif para is None or tag in ('td', 'th'): body = bstate.body if bstate.pbreak: etree.SubElement(body, MBP('pagebreak')) bstate.pbreak = False bstate.istate = None bstate.anchor = None parent = bstate.nested[-1] if bstate.nested else bstate.body indent = istate.indent left = istate.left if isinstance(indent, basestring): indent = 0 if indent < 0 and abs(indent) < left: left += indent indent = 0 elif indent != 0 and abs(indent) < self.profile.fbase: indent = (indent / abs(indent)) * self.profile.fbase if tag in NESTABLE_TAGS and not istate.rendered: para = wrapper = etree.SubElement( parent, XHTML(tag), attrib=istate.attrib) bstate.nested.append(para) if tag == 'li' and len(istates) > 1: istates[-2].list_num += 1 para.attrib['value'] = str(istates[-2].list_num) elif tag in NESTABLE_TAGS and istate.rendered: para = wrapper = bstate.nested[-1] elif left > 0 and indent >= 0: para = wrapper = etree.SubElement(parent, XHTML('blockquote')) para = wrapper emleft = int(round(left / self.profile.fbase)) - 1 emleft = min((emleft, 10)) while emleft > 0: para = etree.SubElement(para, XHTML('blockquote')) emleft -= 1 else: para = wrapper = etree.SubElement(parent, XHTML('p')) bstate.inline = bstate.para = para vspace = bstate.vpadding + bstate.vmargin bstate.vpadding = bstate.vmargin = 0 if tag not in TABLE_TAGS: wrapper.attrib['height'] = self.mobimlize_measure(vspace) para.attrib['width'] = self.mobimlize_measure(indent) elif tag == 'table' and vspace > 0: vspace = int(round(vspace / self.profile.fbase)) while vspace > 0: wrapper.addprevious(etree.Element(XHTML('br'))) vspace -= 1 if istate.halign != 'auto' and isinstance(istate.halign, (str, unicode)): para.attrib['align'] = istate.halign istate.rendered = True pstate = bstate.istate if tag in CONTENT_TAGS: bstate.inline = para pstate = bstate.istate = None etree.SubElement(para, XHTML(tag), attrib=istate.attrib) elif tag in TABLE_TAGS: para.attrib['valign'] = 'top' if istate.ids: last = bstate.body[-1] for id in istate.ids: last.addprevious(etree.Element(XHTML('a'), attrib={'id': id})) istate.ids.clear() if not text: return if not pstate or istate != pstate: inline = para valign = istate.valign fsize = istate.fsize href = istate.href if not href: bstate.anchor = None elif pstate and pstate.href == href: inline = bstate.anchor else: inline = etree.SubElement(inline, XHTML('a'), href=href) bstate.anchor = inline if valign == 'super': inline = etree.SubElement(inline, XHTML('sup')) elif valign == 'sub': inline = etree.SubElement(inline, XHTML('sub')) elif fsize != 3: inline = etree.SubElement(inline, XHTML('font'), size=str(fsize)) if istate.family == 'monospace': inline = etree.SubElement(inline, XHTML('tt')) if istate.italic: inline = etree.SubElement(inline, XHTML('i')) if istate.bold: inline = etree.SubElement(inline, XHTML('b')) if istate.bgcolor is not None and istate.bgcolor != 'transparent' : inline = etree.SubElement(inline, XHTML('span'), bgcolor=istate.bgcolor) if istate.fgcolor != 'black': inline = etree.SubElement(inline, XHTML('font'), color=unicode(istate.fgcolor)) if istate.strikethrough: inline = etree.SubElement(inline, XHTML('s')) if istate.underline: inline = etree.SubElement(inline, XHTML('u')) bstate.inline = inline bstate.istate = istate inline = bstate.inline content = self.preize_text(text) if istate.preserve else [text] for item in content: if isinstance(item, basestring): if len(inline) == 0: inline.text = (inline.text or '') + item else: last = inline[-1] last.tail = (last.tail or '') + item else: inline.append(item)
inline = etree.SubElement(inline, XHTML('sub'))
parent = inline if istate.nest and bstate.inline is not None: parent = bstate.inline istate.nest = False inline = etree.SubElement(parent, XHTML('sub'))
def mobimlize_content(self, tag, text, bstate, istates): 'Convert text content' if text or tag != 'br': bstate.content = True istate = istates[-1] para = bstate.para if tag in SPECIAL_TAGS and not text: para = para if para is not None else bstate.body elif para is None or tag in ('td', 'th'): body = bstate.body if bstate.pbreak: etree.SubElement(body, MBP('pagebreak')) bstate.pbreak = False bstate.istate = None bstate.anchor = None parent = bstate.nested[-1] if bstate.nested else bstate.body indent = istate.indent left = istate.left if isinstance(indent, basestring): indent = 0 if indent < 0 and abs(indent) < left: left += indent indent = 0 elif indent != 0 and abs(indent) < self.profile.fbase: indent = (indent / abs(indent)) * self.profile.fbase if tag in NESTABLE_TAGS and not istate.rendered: para = wrapper = etree.SubElement( parent, XHTML(tag), attrib=istate.attrib) bstate.nested.append(para) if tag == 'li' and len(istates) > 1: istates[-2].list_num += 1 para.attrib['value'] = str(istates[-2].list_num) elif tag in NESTABLE_TAGS and istate.rendered: para = wrapper = bstate.nested[-1] elif left > 0 and indent >= 0: para = wrapper = etree.SubElement(parent, XHTML('blockquote')) para = wrapper emleft = int(round(left / self.profile.fbase)) - 1 emleft = min((emleft, 10)) while emleft > 0: para = etree.SubElement(para, XHTML('blockquote')) emleft -= 1 else: para = wrapper = etree.SubElement(parent, XHTML('p')) bstate.inline = bstate.para = para vspace = bstate.vpadding + bstate.vmargin bstate.vpadding = bstate.vmargin = 0 if tag not in TABLE_TAGS: wrapper.attrib['height'] = self.mobimlize_measure(vspace) para.attrib['width'] = self.mobimlize_measure(indent) elif tag == 'table' and vspace > 0: vspace = int(round(vspace / self.profile.fbase)) while vspace > 0: wrapper.addprevious(etree.Element(XHTML('br'))) vspace -= 1 if istate.halign != 'auto' and isinstance(istate.halign, (str, unicode)): para.attrib['align'] = istate.halign istate.rendered = True pstate = bstate.istate if tag in CONTENT_TAGS: bstate.inline = para pstate = bstate.istate = None etree.SubElement(para, XHTML(tag), attrib=istate.attrib) elif tag in TABLE_TAGS: para.attrib['valign'] = 'top' if istate.ids: last = bstate.body[-1] for id in istate.ids: last.addprevious(etree.Element(XHTML('a'), attrib={'id': id})) istate.ids.clear() if not text: return if not pstate or istate != pstate: inline = para valign = istate.valign fsize = istate.fsize href = istate.href if not href: bstate.anchor = None elif pstate and pstate.href == href: inline = bstate.anchor else: inline = etree.SubElement(inline, XHTML('a'), href=href) bstate.anchor = inline if valign == 'super': inline = etree.SubElement(inline, XHTML('sup')) elif valign == 'sub': inline = etree.SubElement(inline, XHTML('sub')) elif fsize != 3: inline = etree.SubElement(inline, XHTML('font'), size=str(fsize)) if istate.family == 'monospace': inline = etree.SubElement(inline, XHTML('tt')) if istate.italic: inline = etree.SubElement(inline, XHTML('i')) if istate.bold: inline = etree.SubElement(inline, XHTML('b')) if istate.bgcolor is not None and istate.bgcolor != 'transparent' : inline = etree.SubElement(inline, XHTML('span'), bgcolor=istate.bgcolor) if istate.fgcolor != 'black': inline = etree.SubElement(inline, XHTML('font'), color=unicode(istate.fgcolor)) if istate.strikethrough: inline = etree.SubElement(inline, XHTML('s')) if istate.underline: inline = etree.SubElement(inline, XHTML('u')) bstate.inline = inline bstate.istate = istate inline = bstate.inline content = self.preize_text(text) if istate.preserve else [text] for item in content: if isinstance(item, basestring): if len(inline) == 0: inline.text = (inline.text or '') + item else: last = inline[-1] last.tail = (last.tail or '') + item else: inline.append(item)
def get_publisher(self, entry): publisher = entry publitext = None for x in publisher.getiterator('dt'): if self.repub.match(x.text): publitext = x.getnext().text_content() break return unicode(publitext) def get_date(self, entry, verbose): date = entry d = '' for x in date.getiterator('dt'): if x.text == 'Date de parution': d = x.getnext().text_content() break if len(d) == 0: return None try: default = utcnow().replace(day=15) d = replace_monthsfr(d) d = parse_date(d, assume_utc=True, default=default) except: report(verbose) d = None return d def get_ISBN(self, entry): isbn = entry isbntext = None for x in isbn.getiterator('dt'): if x.text == 'ISBN': isbntext = x.getnext().text_content().replace('-', '') if not check_isbn(isbntext): return None break return unicode(isbntext) def get_language(self, entry): language = entry langtext = None for x in language.getiterator('dt'): if x.text == 'Langue': langtext = x.getnext().text_content() break return unicode(langtext)
def get_book_info(self, entry, mi): entry = entry.find("dl[@title='Informations sur le livre']") for x in entry.getiterator('dt'): if x.text == 'ISBN': isbntext = x.getnext().text_content().replace('-', '') if check_isbn(isbntext): mi.isbn = unicode(isbntext) elif self.repub.match(x.text): mi.publisher = unicode(x.getnext().text_content()) elif x.text == 'Langue': mi.language = unicode(x.getnext().text_content()) elif x.text == 'Date de parution': d = x.getnext().text_content() try: default = utcnow().replace(day=15) d = replace_monthsfr(d) d = parse_date(d, assume_utc=True, default=default) mi.pubdate = d except: report(verbose) return mi
(re.compile(u'¨\s*(<br.*?>)*\s*e', re.UNICODE), lambda match: u'ë'), (re.compile(u'¨\s*(<br.*?>)*\s*E', re.UNICODE), lambda match: u'Ë'), (re.compile(u'¨\s*(<br.*?>)*\s*i', re.UNICODE), lambda match: u'ï'), (re.compile(u'¨\s*(<br.*?>)*\s*I', re.UNICODE), lambda match: u'Ï'), (re.compile(u'¨\s*(<br.*?>)*\s*a', re.UNICODE), lambda match: u'ä'), (re.compile(u'¨\s*(<br.*?>)*\s*A', re.UNICODE), lambda match: u'Ä'),
def __call__(self, data, add_namespace=False): from calibre.ebooks.oeb.base import XHTML_CSS_NAMESPACE data = self.PAGE_PAT.sub('', data) if not add_namespace: return data ans, namespaced = [], False for line in data.splitlines(): ll = line.lstrip() if not (namespaced or ll.startswith('@import') or ll.startswith('@charset')): ans.append(XHTML_CSS_NAMESPACE.strip()) namespaced = True ans.append(line)
(re.compile(u'`\s*(<br.*?>)*\s*e', re.UNICODE), lambda match: u'è'), (re.compile(u'`\s*(<br.*?>)*\s*E', re.UNICODE), lambda match: u'È'), (re.compile(u'`\s*(<br.*?>)*\s*i', re.UNICODE), lambda match: u'ì'), (re.compile(u'`\s*(<br.*?>)*\s*I', re.UNICODE), lambda match: u'Ì'), (re.compile(u'`\s*(<br.*?>)*\s*a', re.UNICODE), lambda match: u'à'), (re.compile(u'`\s*(<br.*?>)*\s*A', re.UNICODE), lambda match: u'À'), (re.compile(u'´\s*(<br.*?>)*\s*o', re.UNICODE), lambda match: u'ó'), (re.compile(u'´\s*(<br.*?>)*\s*O', re.UNICODE), lambda match: u'Ó'), (re.compile(u'´\s*(<br.*?>)*\s*u', re.UNICODE), lambda match: u'ú'), (re.compile(u'´\s*(<br.*?>)*\s*U', re.UNICODE), lambda match: u'Ú'),
(re.compile(u'´\s*(<br.*?>)*\s*a', re.UNICODE), lambda match: u'á'), (re.compile(u'´\s*(<br.*?>)*\s*A', re.UNICODE), lambda match: u'Á'), (re.compile(u'´\s*(<br.*?>)*\s*c', re.UNICODE), lambda match: u'ć'), (re.compile(u'´\s*(<br.*?>)*\s*C', re.UNICODE), lambda match: u'Ć'),
def __call__(self, data, add_namespace=False): from calibre.ebooks.oeb.base import XHTML_CSS_NAMESPACE data = self.PAGE_PAT.sub('', data) if not add_namespace: return data ans, namespaced = [], False for line in data.splitlines(): ll = line.lstrip() if not (namespaced or ll.startswith('@import') or ll.startswith('@charset')): ans.append(XHTML_CSS_NAMESPACE.strip()) namespaced = True ans.append(line)
(re.compile(u'´\s*(<br.*?>)*\s*a', re.UNICODE), lambda match: u'á'), (re.compile(u'´\s*(<br.*?>)*\s*A', re.UNICODE), lambda match: u'Á'),
(re.compile(u'´\s*(<br.*?>)*\s*o', re.UNICODE), lambda match: u'ó'), (re.compile(u'´\s*(<br.*?>)*\s*O', re.UNICODE), lambda match: u'Ó'), (re.compile(u'´\s*(<br.*?>)*\s*n', re.UNICODE), lambda match: u'ń'), (re.compile(u'´\s*(<br.*?>)*\s*N', re.UNICODE), lambda match: u'Ń'), (re.compile(u'´\s*(<br.*?>)*\s*s', re.UNICODE), lambda match: u'ś'), (re.compile(u'´\s*(<br.*?>)*\s*S', re.UNICODE), lambda match: u'Ś'), (re.compile(u'´\s*(<br.*?>)*\s*u', re.UNICODE), lambda match: u'ú'), (re.compile(u'´\s*(<br.*?>)*\s*U', re.UNICODE), lambda match: u'Ú'), (re.compile(u'´\s*(<br.*?>)*\s*z', re.UNICODE), lambda match: u'ź'), (re.compile(u'´\s*(<br.*?>)*\s*Z', re.UNICODE), lambda match: u'Ź'), (re.compile(u'ˆ\s*(<br.*?>)*\s*a', re.UNICODE), lambda match: u'â'), (re.compile(u'ˆ\s*(<br.*?>)*\s*A', re.UNICODE), lambda match: u'Â'), (re.compile(u'ˆ\s*(<br.*?>)*\s*e', re.UNICODE), lambda match: u'ê'), (re.compile(u'ˆ\s*(<br.*?>)*\s*E', re.UNICODE), lambda match: u'Ê'), (re.compile(u'ˆ\s*(<br.*?>)*\s*i', re.UNICODE), lambda match: u'î'), (re.compile(u'ˆ\s*(<br.*?>)*\s*I', re.UNICODE), lambda match: u'Î'),
def __call__(self, data, add_namespace=False): from calibre.ebooks.oeb.base import XHTML_CSS_NAMESPACE data = self.PAGE_PAT.sub('', data) if not add_namespace: return data ans, namespaced = [], False for line in data.splitlines(): ll = line.lstrip() if not (namespaced or ll.startswith('@import') or ll.startswith('@charset')): ans.append(XHTML_CSS_NAMESPACE.strip()) namespaced = True ans.append(line)
(re.compile(u'ˆ\s*(<br.*?>)*\s*e', re.UNICODE), lambda match: u'ê'), (re.compile(u'ˆ\s*(<br.*?>)*\s*E', re.UNICODE), lambda match: u'Ê'), (re.compile(u'ˆ\s*(<br.*?>)*\s*i', re.UNICODE), lambda match: u'î'), (re.compile(u'ˆ\s*(<br.*?>)*\s*I', re.UNICODE), lambda match: u'Î'), (re.compile(u'ˆ\s*(<br.*?>)*\s*a', re.UNICODE), lambda match: u'â'), (re.compile(u'ˆ\s*(<br.*?>)*\s*A', re.UNICODE), lambda match: u'Â'),
def __call__(self, data, add_namespace=False): from calibre.ebooks.oeb.base import XHTML_CSS_NAMESPACE data = self.PAGE_PAT.sub('', data) if not add_namespace: return data ans, namespaced = [], False for line in data.splitlines(): ll = line.lstrip() if not (namespaced or ll.startswith('@import') or ll.startswith('@charset')): ans.append(XHTML_CSS_NAMESPACE.strip()) namespaced = True ans.append(line)
stream = path if hasattr(path, 'read') else open(path, 'rb') stream.seek(0) matches = self.data.get_matches('title', title) if matches: tag_matches = self.data.get_matches('tags', _('Catalog')) matches = matches.intersection(tag_matches) db_id, existing = None, False if matches: db_id = list(matches)[0] if db_id is None: obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)', (title, 'calibre')) db_id = obj.lastrowid self.data.books_added([db_id], self) self.set_path(db_id, index_is_id=True) self.conn.commit() try: mi = get_metadata(stream, os.path.splitext(path)[1][1:].lower()) except: mi = MetaInformation(title, ['calibre']) mi.title, mi.authors = title, ['calibre'] mi.tags = [_('Catalog')] mi.pubdate = mi.timestamp = utcnow() self.set_metadata(db_id, mi) self.add_format(db_id, format, stream, index_is_id=True) if not hasattr(path, 'read'): stream.close()
with open(path, 'rb') as stream: matches = self.data.get_matches('title', '='+title) if matches: tag_matches = self.data.get_matches('tags', '='+_('Catalog')) matches = matches.intersection(tag_matches) db_id = None if matches: db_id = list(matches)[0] if db_id is None: obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)', (title, 'calibre')) db_id = obj.lastrowid self.data.books_added([db_id], self) self.set_path(db_id, index_is_id=True) self.conn.commit() try: mi = get_metadata(stream, format) except: import traceback traceback.print_exc() mi = MetaInformation(title, ['calibre']) stream.seek(0) mi.title, mi.authors = title, ['calibre'] mi.tags = [_('Catalog')] mi.pubdate = mi.timestamp = utcnow() self.set_metadata(db_id, mi) self.add_format(db_id, format, stream, index_is_id=True)
def add_catalog(self, path, title): format = os.path.splitext(path)[1][1:].lower() stream = path if hasattr(path, 'read') else open(path, 'rb') stream.seek(0) matches = self.data.get_matches('title', title) if matches: tag_matches = self.data.get_matches('tags', _('Catalog')) matches = matches.intersection(tag_matches) db_id, existing = None, False if matches: db_id = list(matches)[0] if db_id is None: obj = self.conn.execute('INSERT INTO books(title, author_sort) VALUES (?, ?)', (title, 'calibre')) db_id = obj.lastrowid self.data.books_added([db_id], self) self.set_path(db_id, index_is_id=True) self.conn.commit() try: mi = get_metadata(stream, os.path.splitext(path)[1][1:].lower()) except: mi = MetaInformation(title, ['calibre'])
self.javascript('$("body").css("padding-bottom", "%dpx")' % amount)
padding = '%dpx'%amount try: old_padding = unicode(self.javascript('$("body").css("padding-bottom")').toString()) except: old_padding = '' if old_padding != padding: self.javascript('$("body").css("padding-bottom", "%s")' % padding)
def set_bottom_padding(self, amount): self.javascript('$("body").css("padding-bottom", "%dpx")' % amount)
self.func()
self.doit()
def run(self): try: self.func() except Exception, err: import traceback try: err = unicode(err) except: err = repr(err) self.error = (err, traceback.format_exc())
QObject.connect(self.series, SIGNAL('currentIndexChanged(int)'), self.series_changed) QObject.connect(self.series, SIGNAL('editTextChanged(QString)'), self.series_changed) QObject.connect(self.tag_editor_button, SIGNAL('clicked()'), self.tag_editor)
self.series.currentIndexChanged[int].connect(self.series_changed) self.series.editTextChanged.connect(self.series_changed) self.tag_editor_button.clicked.connect(self.tag_editor)
def __init__(self, window, rows, db): QDialog.__init__(self, window) Ui_MetadataBulkDialog.__init__(self) self.setupUi(self) self.db = db self.ids = [db.id(r) for r in rows] self.box_title.setText('<p>' + _('Editing meta information for <b>%d books</b>') % len(rows)) self.write_series = False self.changed = False
def tag_editor(self):
def tag_editor(self, *args):
def tag_editor(self): d = TagEditor(self, self.db, None) d.exec_() if d.result() == QDialog.Accepted: tag_string = ', '.join(d.tags) self.tags.setText(tag_string) self.tags.update_tags_cache(self.db.all_tags()) self.remove_tags.update_tags_cache(self.db.all_tags())
self.changed = bool(self.ids) for w in getattr(self, 'custom_column_widgets', []): w.gui_val def doit(): for id in self.ids: if do_swap_ta: title = self.db.title(id, index_is_id=True) aum = self.db.authors(id, index_is_id=True) if aum: aum = [a.strip().replace('|', ',') for a in aum.split(',')] new_title = authors_to_string(aum) self.db.set_title(id, new_title, notify=False) if title: new_authors = string_to_authors(title) self.db.set_authors(id, new_authors, notify=False) if au: self.db.set_authors(id, string_to_authors(au), notify=False) if do_auto_author: x = self.db.author_sort_from_book(id, index_is_id=True) if x: self.db.set_author_sort(id, x, notify=False) if aus and do_aus: self.db.set_author_sort(id, aus, notify=False) if rating != -1: self.db.set_rating(id, 2*rating, notify=False) if pub: self.db.set_publisher(id, pub, notify=False) if do_series: next = self.db.get_next_series_num_for(series) self.db.set_series(id, series, notify=False) num = next if do_autonumber and series else 1.0 self.db.set_series_index(id, num, notify=False) if do_remove_format: self.db.remove_format(id, remove_format, index_is_id=True, notify=False) if do_remove_conv: self.db.delete_conversion_options(id, 'PIPE') for w in getattr(self, 'custom_column_widgets', []): w.commit(self.ids) self.db.bulk_modify_tags(self.ids, add=add, remove=remove, notify=False) self.db.clean() self.worker = Worker(doit, self) self.worker.start()
args = (remove, add, au, aus, do_aus, rating, pub, do_series, do_autonumber, do_remove_format, remove_format, do_swap_ta, do_remove_conv, do_auto_author, series)
def accept(self): if len(self.ids) < 1: return QDialog.accept(self)
self.worker.finished.connect(bb.accept, type=Qt.QueuedConnection)
self.worker = Worker(args, self.db, self.ids, Dispatcher(bb.accept, parent=bb)) self.worker.start()
def doit(): for id in self.ids: if do_swap_ta: title = self.db.title(id, index_is_id=True) aum = self.db.authors(id, index_is_id=True) if aum: aum = [a.strip().replace('|', ',') for a in aum.split(',')] new_title = authors_to_string(aum) self.db.set_title(id, new_title, notify=False) if title: new_authors = string_to_authors(title) self.db.set_authors(id, new_authors, notify=False)
self.library_view.resizeRowsToContents()
def __init__(self, listener, opts, actions, parent=None): self.preferences_action, self.quit_action = actions self.spare_servers = [] MainWindow.__init__(self, opts, parent) # Initialize fontconfig in a separate thread as this can be a lengthy # process if run for the first time on this machine from calibre.utils.fonts import fontconfig self.fc = fontconfig self.listener = Listener(listener) self.check_messages_timer = QTimer() self.connect(self.check_messages_timer, SIGNAL('timeout()'), self.another_instance_wants_to_talk) self.check_messages_timer.start(1000)
view.resizeRowsToContents()
def metadata_downloaded(self, job): ''' Called once metadata has been read for all books on the device. ''' if job.failed: if isinstance(job.exception, ExpatError): error_dialog(self, _('Device database corrupted'), _(''' <p>The database of books on the reader is corrupted. Try the following: <ol> <li>Unplug the reader. Wait for it to finish regenerating the database (i.e. wait till it is ready to be used). Plug it back in. Now it should work with %(app)s. If not try the next step.</li> <li>Quit %(app)s. Find the file media.xml in the reader's main memory. Delete it. Unplug the reader. Wait for it to regenerate the file. Re-connect it and start %(app)s.</li> </ol> ''')%dict(app=__appname__)).exec_() else: self.device_job_exception(job) return mainlist, cardalist, cardblist = job.result self.memory_view.set_database(mainlist) self.memory_view.set_editable(self.device_manager.device.CAN_SET_METADATA) self.card_a_view.set_database(cardalist) self.card_a_view.set_editable(self.device_manager.device.CAN_SET_METADATA) self.card_b_view.set_database(cardblist) self.card_b_view.set_editable(self.device_manager.device.CAN_SET_METADATA) for view in (self.memory_view, self.card_a_view, self.card_b_view): view.sortByColumn(3, Qt.DescendingOrder) view.read_settings() if not view.restore_column_widths(): view.resizeColumnsToContents() view.resizeRowsToContents() view.resize_on_select = not view.isVisible() self.sync_news() self.sync_catalogs()
view.resizeRowsToContents()
def location_selected(self, location): ''' Called when a location icon is clicked (e.g. Library) ''' page = 0 if location == 'library' else 1 if location == 'main' else 2 if location == 'carda' else 3 self.stack.setCurrentIndex(page) view = self.memory_view if page == 1 else \ self.card_a_view if page == 2 else \ self.card_b_view if page == 3 else None if view: if view.resize_on_select: view.resizeRowsToContents() if not view.restore_column_widths(): view.resizeColumnsToContents() view.resize_on_select = False self.status_bar.reset_info() if location == 'library': self.action_edit.setEnabled(True) self.action_convert.setEnabled(True) self.view_menu.actions()[1].setEnabled(True) self.action_open_containing_folder.setEnabled(True) self.action_sync.setEnabled(True) self.status_bar.tag_view_button.setEnabled(True) self.status_bar.cover_flow_button.setEnabled(True) for action in list(self.delete_menu.actions())[1:]: action.setEnabled(True) else: self.action_edit.setEnabled(False) self.action_convert.setEnabled(False) self.view_menu.actions()[1].setEnabled(False) self.action_open_containing_folder.setEnabled(False) self.action_sync.setEnabled(False) self.status_bar.tag_view_button.setEnabled(False) self.status_bar.cover_flow_button.setEnabled(False) for action in list(self.delete_menu.actions())[1:]: action.setEnabled(False)
which = unhexlify(cid)
which = unhexlify(cid).decode('utf-8')
def browse_matches(self, category=None, cid=None, list_sort=None): if list_sort: list_sort = unquote(list_sort) if not cid: raise cherrypy.HTTPError(404, 'invalid category id: %r'%cid) categories = self.categories_cache()
wand.save(dest+'8') os.rename(dest+'8', dest)
if dest.lower().endswith('.png'): dest += '8' wand.save(dest) if dest.endswith('8'): dest = dest[:-1] os.rename(dest+'8', dest)
def process_pages(self): from calibre.utils.magick import PixelWand for i, wand in enumerate(self.pages): pw = PixelWand() pw.color = 'white'
doc = etree.fromstring(xml, parser=parser)
try: doc = etree.fromstring(xml, parser=parser) except: self.log.warn('Failed to parse XML. Trying to recover') parser = etree.XMLParser(no_network=True, huge_tree=True, recover=True) doc = etree.fromstring(xml, parser=parser)
def convert(self, stream, options, file_ext, log, accelerators): self.log = log self.log('Generating XML') from calibre.ebooks.lrf.lrfparser import LRFDocument d = LRFDocument(stream) d.parse() xml = d.to_xml(write_files=True) if options.verbose > 2: open('lrs.xml', 'wb').write(xml.encode('utf-8')) parser = etree.XMLParser(no_network=True, huge_tree=True) doc = etree.fromstring(xml, parser=parser) char_button_map = {} for x in doc.xpath('//CharButton[@refobj]'): ro = x.get('refobj') jump_button = doc.xpath('//*[@objid="%s"]'%ro) if jump_button: jump_to = jump_button[0].xpath('descendant::JumpTo[@refpage and @refobj]') if jump_to: char_button_map[ro] = '%s.xhtml#%s'%(jump_to[0].get('refpage'), jump_to[0].get('refobj')) plot_map = {} for x in doc.xpath('//Plot[@refobj]'): ro = x.get('refobj') image = doc.xpath('//Image[@objid="%s" and @refstream]'%ro) if image: imgstr = doc.xpath('//ImageStream[@objid="%s" and @file]'% image[0].get('refstream')) if imgstr: plot_map[ro] = imgstr[0].get('file')
extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',',
extra.append(_('TAGS: %s<br />')%xml(format_tag_string(tags, ',',
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix): FM = db.FIELD_MAP title = item[FM['title']] if not title: title = _('Unknown') authors = item[FM['authors']] if not authors: authors = _('Unknown') authors = ' & '.join([i.replace('|', ',') for i in authors.split(',')]) extra = [] rating = item[FM['rating']] if rating > 0: rating = u''.join(repeat(u'\u2605', int(rating/2.))) extra.append(_('RATING: %s<br />')%rating) tags = item[FM['tags']] if tags: extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',', ignore_max=True, no_tag_count=True)) series = item[FM['series']] if series: extra.append(_('SERIES: %s [%s]<br />')%\ (series, fmt_sidx(float(item[FM['series_index']])))) for key in CKEYS: mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True) name, val = mi.format_field(key) if val: datatype = CFM[key]['datatype'] if datatype == 'text' and CFM[key]['is_multiple']: extra.append('%s: %s<br />'%(name, format_tag_string(val, ',', ignore_max=True, no_tag_count=True))) else: extra.append('%s: %s<br />'%(name, val)) comments = item[FM['comments']] if comments: comments = comments_to_html(comments) extra.append(comments) if extra: extra = html_to_lxml('\n'.join(extra)) idm = 'calibre' if version == 0 else 'uuid' id_ = 'urn:%s:%s'%(idm, item[FM['uuid']]) ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_), UPDATED(updated)) if len(extra): ans.append(E.content(extra, type='xhtml')) formats = item[FM['formats']] if formats: for fmt in formats.split(','): fmt = fmt.lower() mt = guess_type('a.'+fmt)[0] href = prefix + '/get/%s/%s'%(fmt, item[FM['id']]) if mt: link = E.link(type=mt, href=href) if version > 0: link.set('rel', "http://opds-spec.org/acquisition") ans.append(link) ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']], rel="x-stanza-cover-image" if version == 0 else "http://opds-spec.org/cover")) ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']], rel="x-stanza-cover-image-thumbnail" if version == 0 else "http://opds-spec.org/thumbnail")) return ans
no_tag_count=True))
no_tag_count=True)))
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix): FM = db.FIELD_MAP title = item[FM['title']] if not title: title = _('Unknown') authors = item[FM['authors']] if not authors: authors = _('Unknown') authors = ' & '.join([i.replace('|', ',') for i in authors.split(',')]) extra = [] rating = item[FM['rating']] if rating > 0: rating = u''.join(repeat(u'\u2605', int(rating/2.))) extra.append(_('RATING: %s<br />')%rating) tags = item[FM['tags']] if tags: extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',', ignore_max=True, no_tag_count=True)) series = item[FM['series']] if series: extra.append(_('SERIES: %s [%s]<br />')%\ (series, fmt_sidx(float(item[FM['series_index']])))) for key in CKEYS: mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True) name, val = mi.format_field(key) if val: datatype = CFM[key]['datatype'] if datatype == 'text' and CFM[key]['is_multiple']: extra.append('%s: %s<br />'%(name, format_tag_string(val, ',', ignore_max=True, no_tag_count=True))) else: extra.append('%s: %s<br />'%(name, val)) comments = item[FM['comments']] if comments: comments = comments_to_html(comments) extra.append(comments) if extra: extra = html_to_lxml('\n'.join(extra)) idm = 'calibre' if version == 0 else 'uuid' id_ = 'urn:%s:%s'%(idm, item[FM['uuid']]) ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_), UPDATED(updated)) if len(extra): ans.append(E.content(extra, type='xhtml')) formats = item[FM['formats']] if formats: for fmt in formats.split(','): fmt = fmt.lower() mt = guess_type('a.'+fmt)[0] href = prefix + '/get/%s/%s'%(fmt, item[FM['id']]) if mt: link = E.link(type=mt, href=href) if version > 0: link.set('rel', "http://opds-spec.org/acquisition") ans.append(link) ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']], rel="x-stanza-cover-image" if version == 0 else "http://opds-spec.org/cover")) ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']], rel="x-stanza-cover-image-thumbnail" if version == 0 else "http://opds-spec.org/thumbnail")) return ans
(series,
(xml(series),
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix): FM = db.FIELD_MAP title = item[FM['title']] if not title: title = _('Unknown') authors = item[FM['authors']] if not authors: authors = _('Unknown') authors = ' & '.join([i.replace('|', ',') for i in authors.split(',')]) extra = [] rating = item[FM['rating']] if rating > 0: rating = u''.join(repeat(u'\u2605', int(rating/2.))) extra.append(_('RATING: %s<br />')%rating) tags = item[FM['tags']] if tags: extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',', ignore_max=True, no_tag_count=True)) series = item[FM['series']] if series: extra.append(_('SERIES: %s [%s]<br />')%\ (series, fmt_sidx(float(item[FM['series_index']])))) for key in CKEYS: mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True) name, val = mi.format_field(key) if val: datatype = CFM[key]['datatype'] if datatype == 'text' and CFM[key]['is_multiple']: extra.append('%s: %s<br />'%(name, format_tag_string(val, ',', ignore_max=True, no_tag_count=True))) else: extra.append('%s: %s<br />'%(name, val)) comments = item[FM['comments']] if comments: comments = comments_to_html(comments) extra.append(comments) if extra: extra = html_to_lxml('\n'.join(extra)) idm = 'calibre' if version == 0 else 'uuid' id_ = 'urn:%s:%s'%(idm, item[FM['uuid']]) ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_), UPDATED(updated)) if len(extra): ans.append(E.content(extra, type='xhtml')) formats = item[FM['formats']] if formats: for fmt in formats.split(','): fmt = fmt.lower() mt = guess_type('a.'+fmt)[0] href = prefix + '/get/%s/%s'%(fmt, item[FM['id']]) if mt: link = E.link(type=mt, href=href) if version > 0: link.set('rel', "http://opds-spec.org/acquisition") ans.append(link) ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']], rel="x-stanza-cover-image" if version == 0 else "http://opds-spec.org/cover")) ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']], rel="x-stanza-cover-image-thumbnail" if version == 0 else "http://opds-spec.org/thumbnail")) return ans
extra.append('%s: %s<br />'%(name, format_tag_string(val, ',',
extra.append('%s: %s<br />'%(xml(name), xml(format_tag_string(val, ',',
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix): FM = db.FIELD_MAP title = item[FM['title']] if not title: title = _('Unknown') authors = item[FM['authors']] if not authors: authors = _('Unknown') authors = ' & '.join([i.replace('|', ',') for i in authors.split(',')]) extra = [] rating = item[FM['rating']] if rating > 0: rating = u''.join(repeat(u'\u2605', int(rating/2.))) extra.append(_('RATING: %s<br />')%rating) tags = item[FM['tags']] if tags: extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',', ignore_max=True, no_tag_count=True)) series = item[FM['series']] if series: extra.append(_('SERIES: %s [%s]<br />')%\ (series, fmt_sidx(float(item[FM['series_index']])))) for key in CKEYS: mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True) name, val = mi.format_field(key) if val: datatype = CFM[key]['datatype'] if datatype == 'text' and CFM[key]['is_multiple']: extra.append('%s: %s<br />'%(name, format_tag_string(val, ',', ignore_max=True, no_tag_count=True))) else: extra.append('%s: %s<br />'%(name, val)) comments = item[FM['comments']] if comments: comments = comments_to_html(comments) extra.append(comments) if extra: extra = html_to_lxml('\n'.join(extra)) idm = 'calibre' if version == 0 else 'uuid' id_ = 'urn:%s:%s'%(idm, item[FM['uuid']]) ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_), UPDATED(updated)) if len(extra): ans.append(E.content(extra, type='xhtml')) formats = item[FM['formats']] if formats: for fmt in formats.split(','): fmt = fmt.lower() mt = guess_type('a.'+fmt)[0] href = prefix + '/get/%s/%s'%(fmt, item[FM['id']]) if mt: link = E.link(type=mt, href=href) if version > 0: link.set('rel', "http://opds-spec.org/acquisition") ans.append(link) ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']], rel="x-stanza-cover-image" if version == 0 else "http://opds-spec.org/cover")) ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']], rel="x-stanza-cover-image-thumbnail" if version == 0 else "http://opds-spec.org/thumbnail")) return ans
no_tag_count=True)))
no_tag_count=True))))
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix): FM = db.FIELD_MAP title = item[FM['title']] if not title: title = _('Unknown') authors = item[FM['authors']] if not authors: authors = _('Unknown') authors = ' & '.join([i.replace('|', ',') for i in authors.split(',')]) extra = [] rating = item[FM['rating']] if rating > 0: rating = u''.join(repeat(u'\u2605', int(rating/2.))) extra.append(_('RATING: %s<br />')%rating) tags = item[FM['tags']] if tags: extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',', ignore_max=True, no_tag_count=True)) series = item[FM['series']] if series: extra.append(_('SERIES: %s [%s]<br />')%\ (series, fmt_sidx(float(item[FM['series_index']])))) for key in CKEYS: mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True) name, val = mi.format_field(key) if val: datatype = CFM[key]['datatype'] if datatype == 'text' and CFM[key]['is_multiple']: extra.append('%s: %s<br />'%(name, format_tag_string(val, ',', ignore_max=True, no_tag_count=True))) else: extra.append('%s: %s<br />'%(name, val)) comments = item[FM['comments']] if comments: comments = comments_to_html(comments) extra.append(comments) if extra: extra = html_to_lxml('\n'.join(extra)) idm = 'calibre' if version == 0 else 'uuid' id_ = 'urn:%s:%s'%(idm, item[FM['uuid']]) ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_), UPDATED(updated)) if len(extra): ans.append(E.content(extra, type='xhtml')) formats = item[FM['formats']] if formats: for fmt in formats.split(','): fmt = fmt.lower() mt = guess_type('a.'+fmt)[0] href = prefix + '/get/%s/%s'%(fmt, item[FM['id']]) if mt: link = E.link(type=mt, href=href) if version > 0: link.set('rel', "http://opds-spec.org/acquisition") ans.append(link) ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']], rel="x-stanza-cover-image" if version == 0 else "http://opds-spec.org/cover")) ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']], rel="x-stanza-cover-image-thumbnail" if version == 0 else "http://opds-spec.org/thumbnail")) return ans
extra.append('%s: %s<br />'%(name, val))
extra.append('%s: %s<br />'%(xml(name), xml(unicode(val))))
def ACQUISITION_ENTRY(item, version, db, updated, CFM, CKEYS, prefix): FM = db.FIELD_MAP title = item[FM['title']] if not title: title = _('Unknown') authors = item[FM['authors']] if not authors: authors = _('Unknown') authors = ' & '.join([i.replace('|', ',') for i in authors.split(',')]) extra = [] rating = item[FM['rating']] if rating > 0: rating = u''.join(repeat(u'\u2605', int(rating/2.))) extra.append(_('RATING: %s<br />')%rating) tags = item[FM['tags']] if tags: extra.append(_('TAGS: %s<br />')%format_tag_string(tags, ',', ignore_max=True, no_tag_count=True)) series = item[FM['series']] if series: extra.append(_('SERIES: %s [%s]<br />')%\ (series, fmt_sidx(float(item[FM['series_index']])))) for key in CKEYS: mi = db.get_metadata(item[CFM['id']['rec_index']], index_is_id=True) name, val = mi.format_field(key) if val: datatype = CFM[key]['datatype'] if datatype == 'text' and CFM[key]['is_multiple']: extra.append('%s: %s<br />'%(name, format_tag_string(val, ',', ignore_max=True, no_tag_count=True))) else: extra.append('%s: %s<br />'%(name, val)) comments = item[FM['comments']] if comments: comments = comments_to_html(comments) extra.append(comments) if extra: extra = html_to_lxml('\n'.join(extra)) idm = 'calibre' if version == 0 else 'uuid' id_ = 'urn:%s:%s'%(idm, item[FM['uuid']]) ans = E.entry(TITLE(title), E.author(E.name(authors)), ID(id_), UPDATED(updated)) if len(extra): ans.append(E.content(extra, type='xhtml')) formats = item[FM['formats']] if formats: for fmt in formats.split(','): fmt = fmt.lower() mt = guess_type('a.'+fmt)[0] href = prefix + '/get/%s/%s'%(fmt, item[FM['id']]) if mt: link = E.link(type=mt, href=href) if version > 0: link.set('rel', "http://opds-spec.org/acquisition") ans.append(link) ans.append(E.link(type='image/jpeg', href=prefix+'/get/cover/%s'%item[FM['id']], rel="x-stanza-cover-image" if version == 0 else "http://opds-spec.org/cover")) ans.append(E.link(type='image/jpeg', href=prefix+'/get/thumb/%s'%item[FM['id']], rel="x-stanza-cover-image-thumbnail" if version == 0 else "http://opds-spec.org/thumbnail")) return ans
cover += '\0' * (size - len(cover)) self.cover_record[:] = cover
if len(cover) <= size: cover += '\0' * (size - len(cover)) self.cover_record[:] = cover
def update_exth_record(rec): recs.append(rec) if rec[0] in self.original_exth_records: self.original_exth_records.pop(rec[0])
thumbnail += '\0' * (size - len(thumbnail)) self.thumbnail_record[:] = thumbnail return
if len(thumbnail) <= size: thumbnail += '\0' * (size - len(thumbnail)) self.thumbnail_record[:] = thumbnail return
def update_exth_record(rec): recs.append(rec) if rec[0] in self.original_exth_records: self.original_exth_records.pop(rec[0])
return len(extensions) == 1 and iter(extensions).next() in ('jpg', 'jpeg', 'png')
comic_extensions = set(['jpg', 'jpeg', 'png']) return len(extensions - comic_extensions) == 0
def is_comic(list_of_names): extensions = set([x.rpartition('.')[-1].lower() for x in list_of_names]) return len(extensions) == 1 and iter(extensions).next() in ('jpg', 'jpeg', 'png')
THUMB_WIDTH = 75 THUMB_HEIGHT = 100
def numberTranslate(self): hundredsNumber = 0 thousandsNumber = 0 hundredsString = "" thousandsString = "" resultString = "" self.suffix = ''
self.__totalSteps = 10.0
self.__totalSteps = 11.0
def __init__(self, db, opts, plugin, report_progress=DummyReporter(), stylesheet="content/stylesheet.css"): self.__opts = opts self.__authors = None self.__basename = opts.basename self.__booksByAuthor = None self.__booksByTitle = None self.__catalogPath = PersistentTemporaryDirectory("_epub_mobi_catalog", prefix='') self.__contentDir = os.path.join(self.catalogPath, "content") self.__currentStep = 0.0 self.__creator = opts.creator self.__db = db self.__descriptionClip = opts.descriptionClip self.__error = None self.__generateForKindle = True if (self.opts.fmt == 'mobi' and \ self.opts.output_profile and \ self.opts.output_profile.startswith("kindle")) else False self.__genres = None self.__genre_tags_dict = None self.__htmlFileList = [] self.__markerTags = self.getMarkerTags() self.__ncxSoup = None self.__playOrder = 1 self.__plugin = plugin self.__progressInt = 0.0 self.__progressString = '' self.__reporter = report_progress self.__stylesheet = stylesheet self.__thumbs = None self.__title = opts.catalog_title self.__totalSteps = 10.0 self.__verbose = opts.verbose
if self.opts.fmt == 'mobi': imgTag['style'] = 'width: %dpx; height:%dpx;' % (self.THUMB_WIDTH, self.THUMB_HEIGHT)
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir self.updateProgressFullStep("'Descriptions'")
factor = 2 if self.opts.fmt == 'epub' else 1 pw.MagickThumbnailImage(thumb, factor*self.THUMB_WIDTH, factor*self.THUMB_HEIGHT)
pw.MagickThumbnailImage(thumb, self.thumbWidth, self.thumbHeight)
def generateThumbnail(self, title, image_dir, thumb_file): import calibre.utils.PythonMagickWand as pw try: img = pw.NewMagickWand() if img < 0: raise RuntimeError('generateThumbnail(): Cannot create wand') # Read the cover if not pw.MagickReadImage(img, title['cover'].encode(filesystem_encoding)): self.opts.log.error('generateThumbnail(): Failed to read cover image from: %s' % title['cover']) raise IOError thumb = pw.CloneMagickWand(img) if thumb < 0: self.opts.log.error('generateThumbnail(): Cannot clone cover') raise RuntimeError # img, width, height factor = 2 if self.opts.fmt == 'epub' else 1 pw.MagickThumbnailImage(thumb, factor*self.THUMB_WIDTH, factor*self.THUMB_HEIGHT) pw.MagickWriteImage(thumb, os.path.join(image_dir, thumb_file)) pw.DestroyMagickWand(thumb) pw.DestroyMagickWand(img) except IOError: self.opts.log.error("generateThumbnail(): IOError with %s" % title['title']) except RuntimeError: self.opts.log.error("generateThumbnail(): RuntimeError with %s" % title['title'])
formatter = (lambda x:u'\u2605'*int(round(x/2.)))
formatter = (lambda x:u'\u2605'*int(x/2))
def get_categories(self, sort='name', ids=None, icon_map=None): self.books_list_filter.change([] if not ids else ids)
re.compile(r'<\?[^<>]+encoding=[\'"](.*?)[\'"][^<>]*>',
re.compile(r'<\?[^<>]+encoding\s*=\s*[\'"](.*?)[\'"][^<>]*>',
def detect(aBuf): import calibre.ebooks.chardet.universaldetector as universaldetector u = universaldetector.UniversalDetector() u.reset() u.feed(aBuf) u.close() return u.result
re.compile(r'''<meta\s+?[^<>]+?content=['"][^'"]*?charset=([-a-z0-9]+)[^'"]*?['"][^<>]*>''',
re.compile(r'''<meta\s+?[^<>]+?content\s*=\s*['"][^'"]*?charset=([-a-z0-9]+)[^'"]*?['"][^<>]*>''',
def detect(aBuf): import calibre.ebooks.chardet.universaldetector as universaldetector u = universaldetector.UniversalDetector() u.reset() u.feed(aBuf) u.close() return u.result
val = fm['is_multiple'].join(res)
val = res if fm['is_custom']: val = fm['is_multiple'].join(val)
def apply_pattern(val): try: return self.s_r_obj.sub(self.s_r_func, val) except: return val
if field == 'authors': val = string_to_authors(val)
def apply_pattern(val): try: return self.s_r_obj.sub(self.s_r_func, val) except: return val
if self.use_author_sort and book.author_sort is not None: record.set('author', clean(book.author_sort))
if self.use_author_sort: if book.author_sort: aus = book.author_sort else: debug_print('Author_sort is None for book', book.lpath) aus = authors_to_sort_string(book.authors) record.set('author', clean(aus))
def clean(x): if isbytestring(x): x = x.decode(preferred_encoding, 'replace') x.replace(u'\0', '') return x
msg = MIMEText(text)
msg = MIMEText(text, 'plain', 'utf-8')
def create_mail(from_, to, subject, text=None, attachment_data=None, attachment_type=None, attachment_name=None): assert text or attachment_data from email.mime.multipart import MIMEMultipart outer = MIMEMultipart() outer['Subject'] = subject outer['To'] = to outer['From'] = from_ outer.preamble = 'You will not see this in a MIME-aware mail reader.\n' if text is not None: from email.mime.text import MIMEText msg = MIMEText(text) outer.attach(msg) if attachment_data is not None: from email.mime.base import MIMEBase assert attachment_data and attachment_name try: maintype, subtype = attachment_type.split('/', 1) except AttributeError: maintype, subtype = 'application', 'octet-stream' msg = MIMEBase(maintype, subtype) msg.set_payload(attachment_data) encoders.encode_base64(msg) msg.add_header('Content-Disposition', 'attachment', filename=attachment_name) outer.attach(msg) return outer.as_string()
def get_cover(opf, opf_path, stream):
def get_cover(opf, opf_path, stream, reader=None):
def get_cover(opf, opf_path, stream): import posixpath from calibre.ebooks import render_html_svg_workaround from calibre.utils.logging import default_log raster_cover = opf.raster_cover stream.seek(0) zf = ZipFile(stream) if raster_cover: base = posixpath.dirname(opf_path) cpath = posixpath.normpath(posixpath.join(base, raster_cover)) try: member = zf.getinfo(cpath) except: pass else: f = zf.open(member) data = f.read() f.close() zf.close() return data cpage = opf.first_spine_item() if not cpage: return with TemporaryDirectory('_epub_meta') as tdir: with CurrentDir(tdir): zf.extractall() opf_path = opf_path.replace('/', os.sep) cpage = os.path.join(tdir, os.path.dirname(opf_path), cpage) if not os.path.exists(cpage): return return render_html_svg_workaround(cpage, default_log)
cdata = get_cover(reader.opf, reader.opf_path, stream)
cdata = get_cover(reader.opf, reader.opf_path, stream, reader=reader)
def get_metadata(stream, extract_cover=True): """ Return metadata as a :class:`MetaInformation` object """ stream.seek(0) reader = OCFZipReader(stream) mi = MetaInformation(reader.opf) if extract_cover: try: cdata = get_cover(reader.opf, reader.opf_path, stream) if cdata is not None: mi.cover_data = ('jpg', cdata) except: import traceback traceback.print_exc() return mi
self.setHtml(u'<table>%s</table>'%rows)
self.setHtml(templ%(u'<table>%s</table>'%rows))
def _show_data(self, rows, comments): if self.vertical: if comments: rows += u'<tr><td colspan="2">%s</td></tr>'%comments self.setHtml(u'<table>%s</table>'%rows) else: left_pane = u'<table>%s</table>'%rows right_pane = u'<div>%s</div>'%comments self.setHtml(u'<table><tr><td valign="top" ' 'style="padding-right:2em">%s</td><td valign="top">%s</td></tr></table>' % (left_pane, right_pane))
self.setHtml(u'<table><tr><td valign="top" '
self.setHtml(templ%(u'<table><tr><td valign="top" '
def _show_data(self, rows, comments): if self.vertical: if comments: rows += u'<tr><td colspan="2">%s</td></tr>'%comments self.setHtml(u'<table>%s</table>'%rows) else: left_pane = u'<table>%s</table>'%rows right_pane = u'<div>%s</div>'%comments self.setHtml(u'<table><tr><td valign="top" ' 'style="padding-right:2em">%s</td><td valign="top">%s</td></tr></table>' % (left_pane, right_pane))
% (left_pane, right_pane))
% (left_pane, right_pane)))
def _show_data(self, rows, comments): if self.vertical: if comments: rows += u'<tr><td colspan="2">%s</td></tr>'%comments self.setHtml(u'<table>%s</table>'%rows) else: left_pane = u'<table>%s</table>'%rows right_pane = u'<div>%s</div>'%comments self.setHtml(u'<table><tr><td valign="top" ' 'style="padding-right:2em">%s</td><td valign="top">%s</td></tr></table>' % (left_pane, right_pane))
except ValueError:
except:
def compute_locale_info_for_parse_date(): try: dt = datetime.strptime('1/5/2000', "%x") except ValueError: try: dt = datetime.strptime('1/5/01', '%x') except: return False if dt.month == 5: return True return False
doc = etree.fromstring(raw)
doc = etree.fromstring(raw.replace('\0', ''))
def convert(self, stream, options, file_ext, log, accelerators): from calibre.ebooks.metadata.opf2 import OPFCreator from calibre.ebooks.metadata.meta import get_metadata from calibre.ebooks.oeb.base import XLINK_NS NAMESPACES = {'f':FB2NS, 'l':XLINK_NS} log.debug('Parsing XML...') raw = stream.read() try: doc = etree.fromstring(raw) except etree.XMLSyntaxError: doc = etree.fromstring(raw.replace('& ', '&amp;')) self.extract_embedded_content(doc) log.debug('Converting XML to HTML...') ss = open(P('templates/fb2.xsl'), 'rb').read() if options.no_inline_fb2_toc: log('Disabling generation of inline FB2 TOC') ss = re.compile(r'<!-- BUILD TOC -->.*<!-- END BUILD TOC -->', re.DOTALL).sub('', ss)
def open(self): USBMS.open(self)
def post_open_callback(self):
def open(self): USBMS.open(self)
self.orig_timestamp = timestamp
self.orig_timestamp = timestamp.astimezone(utc_tz)
def __init__(self, window, row, db, accepted_callback=None, cancel_all=False): ResizableDialog.__init__(self, window) self.bc_box.layout().setAlignment(self.cover, Qt.AlignCenter|Qt.AlignHCenter) self.cancel_all = False base = unicode(self.author_sort.toolTip()) self.ok_aus_tooltip = '<p>' + textwrap.fill(base+'<br><br>'+ _(' The green color indicates that the current ' 'author sort matches the current author')) self.bad_aus_tooltip = '<p>'+textwrap.fill(base + '<br><br>'+ _(' The red color indicates that the current ' 'author sort does not match the current author'))
obsolete = '
def migrate_preference(name, default): obsolete = '###OBSOLETE--DON\'T USE ME###' ans = self.prefs.get(name, None) if ans is None: ans = prefs[name] if ans in (None, obsolete): ans = default prefs[name] = obsolete self.prefs[name] = ans
if ans in (None, obsolete):
if ans is None:
def migrate_preference(name, default): obsolete = '###OBSOLETE--DON\'T USE ME###' ans = self.prefs.get(name, None) if ans is None: ans = prefs[name] if ans in (None, obsolete): ans = default prefs[name] = obsolete self.prefs[name] = ans
prefs[name] = obsolete self.prefs[name] = ans
prefs[name] = self.prefs[name] = ans
def migrate_preference(name, default): obsolete = '###OBSOLETE--DON\'T USE ME###' ans = self.prefs.get(name, None) if ans is None: ans = prefs[name] if ans in (None, obsolete): ans = default prefs[name] = obsolete self.prefs[name] = ans
break
continue
def book_on_device(self, id, format=None, reset=False): loc = [None, None, None]
title_words = title.split(' ')
title_words = title.split() stop_words = ['a','an','the']
def generateSortTitle(self, title): # Convert the actual title to a string suitable for sorting. # Convert numbers to strings, ignore leading stop words # The 21-Day Consciousness Cleanse
hit = re.search('[0-9]+',word) if hit :
if i==0 and re.search('[0-9]+',word):
def generateSortTitle(self, title): # Convert the actual title to a string suitable for sorting. # Convert numbers to strings, ignore leading stop words # The 21-Day Consciousness Cleanse
if attr == 'series':
if attr == 'series' or \ ('series' in collection_attributes and getattr(book, 'series', None) == category):
def get_collections(self, collection_attributes): from calibre.devices.usbms.driver import debug_print debug_print('Starting get_collections:', prefs['manage_device_metadata']) collections = {} series_categories = set([]) # This map of sets is used to avoid linear searches when testing for # book equality collections_lpaths = {} for book in self: # Make sure we can identify this book via the lpath lpath = getattr(book, 'lpath', None) if lpath is None: continue # Decide how we will build the collections. The default: leave the # book in all existing collections. Do not add any new ones. attrs = ['device_collections'] if getattr(book, '_new_book', False): if prefs['manage_device_metadata'] == 'manual': # Ensure that the book is in all the book's existing # collections plus all metadata collections attrs += collection_attributes else: # For new books, both 'on_send' and 'on_connect' do the same # thing. The book's existing collections are ignored. Put # the book in collections defined by its metadata. attrs = collection_attributes elif prefs['manage_device_metadata'] == 'on_connect': # For existing books, modify the collections only if the user # specified 'on_connect' attrs = collection_attributes for attr in attrs: attr = attr.strip() val = getattr(book, attr, None) if not val: continue if isbytestring(val): val = val.decode(preferred_encoding, 'replace') if isinstance(val, (list, tuple)): val = list(val) elif isinstance(val, unicode): val = [val] for category in val: if attr == 'tags' and len(category) > 1 and \ category[0] == '[' and category[-1] == ']': continue if category not in collections: collections[category] = [] collections_lpaths[category] = set() if lpath not in collections_lpaths[category]: collections_lpaths[category].add(lpath) collections[category].append(book) if attr == 'series': series_categories.add(category) # Sort collections for category, books in collections.items(): def tgetter(x): return getattr(x, 'title_sort', 'zzzz') books.sort(cmp=lambda x,y:cmp(tgetter(x), tgetter(y))) if category in series_categories: # Ensures books are sub sorted by title def getter(x): return getattr(x, 'series_index', sys.maxint) books.sort(cmp=lambda x,y:cmp(getter(x), getter(y))) return collections
'''absorbed = set([])
def coalesce_regions(self): # find contiguous sets of small regions # absorb into a neighboring region (prefer the one with number of cols # closer to the avg number of cols in the set, if equal use larger # region) # merge contiguous regions that can contain each other '''absorbed = set([]) found = True while found: found = False for i, region in enumerate(self.regions): if region.is_small: found = True regions = [] for j in range(i+1, len(self.regions)): if self.regions[j].is_small: regions.append(self.regions[j]) else: break prev = None if i == 0 else i-1 next = j if self.regions[j] not in regions else None ''' pass
prev = None if i == 0 else i-1 next = j if self.regions[j] not in regions else None ''' pass
prev_region = None if i == 0 else i-1 next_region = j if self.regions[j] not in regions else None if prev_region is None and next_region is not None: absorb_into = next_region elif next_region is None and prev_region is not None: absorb_into = prev_region elif prev_region is None and next_region is None: if len(regions) > 1: absorb_into = regions[0] regions = regions[1:] else: absorb_into = None else: absorb_into = prev_region if next_region.line_count >= prev_region.line_count: avg_column_count = sum([len(r.columns) for r in regions])/float(len(regions)) if next_region.line_count > prev_region.line_count \ or abs(avg_column_count - len(prev_region.columns)) \ > abs(avg_column_count - len(next_region.columns)): absorb_into = next_region if absorb_into is not None: absorb_into.absorb_region(regions) absorbed.update(regions) i = j for region in absorbed: self.regions.remove(region)
def coalesce_regions(self): # find contiguous sets of small regions # absorb into a neighboring region (prefer the one with number of cols # closer to the avg number of cols in the set, if equal use larger # region) # merge contiguous regions that can contain each other '''absorbed = set([]) found = True while found: found = False for i, region in enumerate(self.regions): if region.is_small: found = True regions = [] for j in range(i+1, len(self.regions)): if self.regions[j].is_small: regions.append(self.regions[j]) else: break prev = None if i == 0 else i-1 next = j if self.regions[j] not in regions else None ''' pass
if not mi.title:
if not mi.title or mi.title == _('Unknown'):
def _run(self): self.key = get_isbndb_key() if not self.key: self.key = None self.fetched_metadata = {} self.failures = {} with self.worker: for id, mi in self.metadata.items(): args = {} if mi.isbn: args['isbn'] = mi.isbn else: if not mi.title: self.failures[id] = \ (str(id), _('Book has neither title nor ISBN')) continue args['title'] = mi.title if mi.authors: args['author'] = mi.authors[0] if self.key: args['isbndb_key'] = self.key results, exceptions = search(**args) if results: fmi = results[0] self.fetched_metadata[id] = fmi if fmi.isbn and self.get_covers: self.worker.jobs.put(fmi.isbn) if (not config['overwrite_author_title_metadata']): fmi.authors = mi.authors fmi.author_sort = mi.author_sort fmi.title = mi.title mi.smart_update(fmi) if mi.isbn and self.get_social_metadata: self.social_metadata_exceptions = get_social_metadata(mi) if mi.rating: mi.rating *= 2 if not self.get_social_metadata: mi.tags = [] else: self.failures[id] = (mi.title, _('No matches found for this book')) self.commit_covers()
if mi.authors:
if mi.authors and mi.authors[0] != _('Unknown'):
def _run(self): self.key = get_isbndb_key() if not self.key: self.key = None self.fetched_metadata = {} self.failures = {} with self.worker: for id, mi in self.metadata.items(): args = {} if mi.isbn: args['isbn'] = mi.isbn else: if not mi.title: self.failures[id] = \ (str(id), _('Book has neither title nor ISBN')) continue args['title'] = mi.title if mi.authors: args['author'] = mi.authors[0] if self.key: args['isbndb_key'] = self.key results, exceptions = search(**args) if results: fmi = results[0] self.fetched_metadata[id] = fmi if fmi.isbn and self.get_covers: self.worker.jobs.put(fmi.isbn) if (not config['overwrite_author_title_metadata']): fmi.authors = mi.authors fmi.author_sort = mi.author_sort fmi.title = mi.title mi.smart_update(fmi) if mi.isbn and self.get_social_metadata: self.social_metadata_exceptions = get_social_metadata(mi) if mi.rating: mi.rating *= 2 if not self.get_social_metadata: mi.tags = [] else: self.failures[id] = (mi.title, _('No matches found for this book')) self.commit_covers()
esize = 3 + force_int(size)
try: esize = 3 + force_int(size) except: esize = 3
def force_int(raw): return int(re.search(r'([0-9+-]+)', raw).group(1))
text = re.sub(r'(?imsu)(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(?P<strong>(<p>)*\s*<strong>.+?</strong>\s*(</p>)*)', lambda mo: '</section><section>%s<title>%s</title>' % (mo.group('anchor'), mo.group('strong')), text) text = re.sub(r'(?imsu)<p>\s*(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*</p>\s*(?P<strong>(<p>)*\s*<strong>.+?</strong>\s*(</p>)*)', lambda mo: '</section><section>%s<title>%s</title>' % (mo.group('anchor'), mo.group('strong')), text)
def remove_p(t): t = t.replace('<p>', '') t = t.replace('</p>', '') return t text = re.sub(r'(?imsu)(<p>)\s*(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(</p>)\s*(<p>)\s*(?P<strong><strong>.+?</strong>)\s*(</p>)', lambda mo: '</section><section>%s<title><p>%s</p></title>' % (mo.group('anchor'), remove_p(mo.group('strong'))), text) text = re.sub(r'(?imsu)(<p>)\s*(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(</p>)\s*(?P<strong><strong>.+?</strong>)', lambda mo: '</section><section>%s<title><p>%s</p></title>' % (mo.group('anchor'), remove_p(mo.group('strong'))), text) text = re.sub(r'(?imsu)(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(<p>)\s*(?P<strong><strong>.+?</strong>)\s*(</p>)', lambda mo: '</section><section>%s<title><p>%s</p></title>' % (mo.group('anchor'), remove_p(mo.group('strong'))), text) text = re.sub(r'(?imsu)(<p>)\s*(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(?P<strong><strong>.+?</strong>)\s*(</p>)', lambda mo: '</section><section>%s<title><p>%s</p></title>' % (mo.group('anchor'), remove_p(mo.group('strong'))), text) text = re.sub(r'(?imsu)(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(?P<strong><strong>.+?</strong>)', lambda mo: '</section><section>%s<title><p>%s</p></title>' % (mo.group('anchor'), remove_p(mo.group('strong'))), text)
def sectionize_chapters(self, text): text = re.sub(r'(?imsu)(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*(?P<strong>(<p>)*\s*<strong>.+?</strong>\s*(</p>)*)', lambda mo: '</section><section>%s<title>%s</title>' % (mo.group('anchor'), mo.group('strong')), text) text = re.sub(r'(?imsu)<p>\s*(?P<anchor><a\s+id="calibre_link-\d+"\s*/>)\s*</p>\s*(?P<strong>(<p>)*\s*<strong>.+?</strong>\s*(</p>)*)', lambda mo: '</section><section>%s<title>%s</title>' % (mo.group('anchor'), mo.group('strong')), text) return text
number = int(self.number)
try: number = int(self.number) except: return
def numberTranslate(self): hundredsNumber = 0 thousandsNumber = 0 hundredsString = "" thousandsString = "" resultString = ""
print "library.catalog:CatalogBuilder.generateSortTitle(): translating '%s'" % word
def generateSortTitle(self, title): # Convert the actual title to a string suitable for sorting. # Convert numbers to strings, ignore leading stop words # The 21-Day Consciousness Cleanse
from calibre.ebooks.oeb.base import XLINK_NS
from calibre.ebooks.oeb.base import XLINK_NS, XHTML_NS
def convert(self, stream, options, file_ext, log, accelerators): from calibre.ebooks.metadata.opf2 import OPFCreator from calibre.ebooks.metadata.meta import get_metadata from calibre.ebooks.oeb.base import XLINK_NS NAMESPACES = {'f':FB2NS, 'l':XLINK_NS} log.debug('Parsing XML...') raw = stream.read() try: doc = etree.fromstring(raw.replace('\0', '')) except etree.XMLSyntaxError: doc = etree.fromstring(raw.replace('& ', '&amp;')) self.extract_embedded_content(doc) log.debug('Converting XML to HTML...') ss = open(P('templates/fb2.xsl'), 'rb').read() if options.no_inline_fb2_toc: log('Disabling generation of inline FB2 TOC') ss = re.compile(r'<!-- BUILD TOC -->.*<!-- END BUILD TOC -->', re.DOTALL).sub('', ss)
open('index.xhtml', 'wb').write(transform.tostring(result))
index = transform.tostring(result) open('index.xhtml', 'wb').write(index) open('inline-styles.css', 'wb').write(css)
def convert(self, stream, options, file_ext, log, accelerators): from calibre.ebooks.metadata.opf2 import OPFCreator from calibre.ebooks.metadata.meta import get_metadata from calibre.ebooks.oeb.base import XLINK_NS NAMESPACES = {'f':FB2NS, 'l':XLINK_NS} log.debug('Parsing XML...') raw = stream.read() try: doc = etree.fromstring(raw.replace('\0', '')) except etree.XMLSyntaxError: doc = etree.fromstring(raw.replace('& ', '&amp;')) self.extract_embedded_content(doc) log.debug('Converting XML to HTML...') ss = open(P('templates/fb2.xsl'), 'rb').read() if options.no_inline_fb2_toc: log('Disabling generation of inline FB2 TOC') ss = re.compile(r'<!-- BUILD TOC -->.*<!-- END BUILD TOC -->', re.DOTALL).sub('', ss)
if os.path.isabs(member.filename): targetpath = os.path.join(targetpath, member.filename[1:]) else: targetpath = os.path.join(targetpath, member.filename)
fname = decode_arcname(member.filename) if fname.startswith('/'): fname = fname[1:] targetpath = os.path.join(targetpath, fname)
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ # build the destination pathname, replacing # forward slashes to platform specific separators. if targetpath[-1:] == "/": targetpath = targetpath[:-1]
if not isinstance(targetpath, unicode): encoding = detect(targetpath)['encoding'] try: targetpath = targetpath.decode(encoding) except: targetpath = targetpath.decode('utf-8', 'replace') targetpath = targetpath.encode(filesystem_encoding)
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ # build the destination pathname, replacing # forward slashes to platform specific separators. if targetpath[-1:] == "/": targetpath = targetpath[:-1]
source = self.open(member, pwd=pwd)
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ # build the destination pathname, replacing # forward slashes to platform specific separators. if targetpath[-1:] == "/": targetpath = targetpath[:-1]
try: target = open(targetpath, "wb") except IOError: targetpath = sanitize_file_name(targetpath) target = open(targetpath, "wb") shutil.copyfileobj(source, target) source.close() target.close()
with closing(self.open(member, pwd=pwd)) as source: with open(targetpath, 'wb') as target: shutil.copyfileobj(source, target)
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ # build the destination pathname, replacing # forward slashes to platform specific separators. if targetpath[-1:] == "/": targetpath = targetpath[:-1]
z = ZipFile(zipstream, 'w') path = os.path.join(tdir, *name.split('/')) shutil.copyfileobj(datastream, open(path, 'wb')) for info in names: current = os.path.join(tdir, *info.filename.split('/')) if os.path.isdir(current): z.writestr(info.filename+'/', '', 0700) else: z.write(current, info.filename, compress_type=info.compress_type) z.close()
with closing(ZipFile(zipstream, 'w')) as z: for info in names: fname = decode_arcname(info.filename) current = os.path.join(tdir, *fname.split('/')) if os.path.isdir(current): z.writestr(info.filename+'/', '', 0700) else: z.write(current, info.filename, compress_type=info.compress_type)
def safe_replace(zipstream, name, datastream): ''' Replace a file in a zip file in a safe manner. This proceeds by extracting and re-creating the zipfile. This is neccessary because :method:`ZipFile.replace` sometimes created corrupted zip files. :param zipstream: Stream from a zip file :param name: The name of the file to replace :param datastream: The data to replace the file with. ''' z = ZipFile(zipstream, 'r') names = z.infolist() with TemporaryDirectory('_zipfile_replace') as tdir: z.extractall(path=tdir) zipstream.seek(0) zipstream.truncate() z = ZipFile(zipstream, 'w') path = os.path.join(tdir, *name.split('/')) shutil.copyfileobj(datastream, open(path, 'wb')) for info in names: current = os.path.join(tdir, *info.filename.split('/')) if os.path.isdir(current): z.writestr(info.filename+'/', '', 0700) else: z.write(current, info.filename, compress_type=info.compress_type) z.close()