rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
max_results=10, rlang='all'):
|
max_results=20, rlang='all'):
|
def __init__(self, title=None, author=None, publisher=None, isbn=None, keywords=None, max_results=10, rlang='all'): assert not(title is None and author is None and publisher is None \ and isbn is None and keywords is None) assert (max_results < 11)
|
assert (max_results < 11)
|
assert (max_results < 21)
|
def __init__(self, title=None, author=None, publisher=None, isbn=None, keywords=None, max_results=10, rlang='all'): assert not(title is None and author is None and publisher is None \ and isbn is None and keywords is None) assert (max_results < 11)
|
def populate(self, entries, br, verbose=False): res = [] for x in entries: entry = self.get_individual_metadata(x, br, verbose)
|
def fetchdatathread(self, qbr, qsync, nb, url, verbose): try: browser = qbr.get(True) entry = self.get_individual_metadata(url, browser, verbose) except: report(verbose) entry = None finally: qbr.put(browser, True) qsync.put(nb, True) return entry def producer(self, sync, urls, br, verbose=False): for i in xrange(len(urls)): thread = ThreadwithResults(self.fetchdatathread, br, sync, i, urls[i], verbose) thread.start() self.thread.append(thread) def consumer(self, sync, syncbis, br, total_entries, verbose=False): i=0 while i < total_entries: nb = int(sync.get(True)) self.thread[nb].join() entry = self.thread[nb].get_result() i+=1
|
def populate(self, entries, br, verbose=False): res = [] for x in entries: entry = self.get_individual_metadata(x, br, verbose) if entry is not None: mi = self.fill_MI(entry, verbose) if mi is not None: mi.tags, atag = self.get_tags(entry, verbose) if atag: tags = self.get_individual_metadata(mi.tags, br, verbose) if tags is not None: mi.tags = self.get_tags(tags, verbose)[0] res.append(mi) return res
|
tags = self.get_individual_metadata(mi.tags, br, verbose) if tags is not None: mi.tags = self.get_tags(tags, verbose)[0] res.append(mi) return res
|
threadbis = ThreadwithResults(self.fetchdatathread, br, syncbis, nb, mi.tags, verbose) self.thread[nb] = threadbis self.nbtag +=1 threadbis.start() def populate(self, entries, ibr, verbose=False, brcall=3): br = Queue(brcall) cbr = Queue(brcall-1) syncp = Queue(1) syncc = Queue(len(entries)) for i in xrange(brcall-1): br.put(browser(), True) cbr.put(browser(), True) br.put(ibr, True) self.res = [None]*len(entries) prod_thread = Thread(target=self.producer, args=(syncp, entries, br, verbose)) cons_thread = Thread(target=self.consumer, args=(syncp, syncc, cbr, len(entries), verbose)) prod_thread.start() cons_thread.start() prod_thread.join() cons_thread.join() for i in xrange(self.nbtag): nb = int(syncc.get(True)) tags = self.thread[nb].get_result() if tags is not None: self.res[nb].tags = self.get_tags(tags, verbose)[0] return self.res
|
def populate(self, entries, br, verbose=False): res = [] for x in entries: entry = self.get_individual_metadata(x, br, verbose) if entry is not None: mi = self.fill_MI(entry, verbose) if mi is not None: mi.tags, atag = self.get_tags(entry, verbose) if atag: tags = self.get_individual_metadata(mi.tags, br, verbose) if tags is not None: mi.tags = self.get_tags(tags, verbose)[0] res.append(mi) return res
|
ISBN, publisher or keywords. Will fetch a maximum of 10 matches,
|
ISBN, publisher or keywords. Will fetch a maximum of 20 matches,
|
def option_parser(): parser = OptionParser(textwrap.dedent(\ _('''\ %prog [options] Fetch book metadata from Amazon. You must specify one of title, author, ISBN, publisher or keywords. Will fetch a maximum of 10 matches, so you should make your query as specific as possible. You can chose the language for metadata retrieval: english & french & german ''' ))) parser.add_option('-t', '--title', help=_('Book title')) parser.add_option('-a', '--author', help=_('Book author(s)')) parser.add_option('-p', '--publisher', help=_('Book publisher')) parser.add_option('-i', '--isbn', help=_('Book ISBN')) parser.add_option('-k', '--keywords', help=_('Keywords')) parser.add_option('-s', '--social', default=0, action='count', help=_('Get social data only')) parser.add_option('-m', '--max-results', default=10, help=_('Maximum number of results to fetch')) parser.add_option('-l', '--lang', default='all', help=_('Chosen language for metadata search (en, fr, de)')) parser.add_option('-v', '--verbose', default=0, action='count', help=_('Be more verbose about errors')) return parser
|
def setup_printer(opts, for_comic=False):
|
def get_pdf_printer(opts, for_comic=False):
|
def setup_printer(opts, for_comic=False): from calibre.gui2 import is_ok_to_use_qt if not is_ok_to_use_qt(): raise Exception('Not OK to use Qt') printer = get_pdf_printer() custom_size = get_custom_size(opts) if opts.output_profile.short_name == 'default': if custom_size is None: printer.setPaperSize(paper_size(opts.paper_size)) else: printer.setPaperSize(QSizeF(custom_size[0], custom_size[1]), unit(opts.unit)) else: w = opts.output_profile.comic_screen_size[0] if for_comic else \ opts.output_profile.width h = opts.output_profile.comic_screen_size[1] if for_comic else \ opts.output_profile.height dpi = opts.output_profile.dpi printer.setPaperSize(QSizeF(float(w) / dpi, float(h)/dpi), QPrinter.Inch) printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) return printer
|
printer = get_pdf_printer()
|
printer = QPrinter(QPrinter.HighResolution)
|
def setup_printer(opts, for_comic=False): from calibre.gui2 import is_ok_to_use_qt if not is_ok_to_use_qt(): raise Exception('Not OK to use Qt') printer = get_pdf_printer() custom_size = get_custom_size(opts) if opts.output_profile.short_name == 'default': if custom_size is None: printer.setPaperSize(paper_size(opts.paper_size)) else: printer.setPaperSize(QSizeF(custom_size[0], custom_size[1]), unit(opts.unit)) else: w = opts.output_profile.comic_screen_size[0] if for_comic else \ opts.output_profile.width h = opts.output_profile.comic_screen_size[1] if for_comic else \ opts.output_profile.height dpi = opts.output_profile.dpi printer.setPaperSize(QSizeF(float(w) / dpi, float(h)/dpi), QPrinter.Inch) printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) return printer
|
printer.setPageMargins(0, 0, 0, 0, QPrinter.Point)
|
printer.setPageMargins(opts.margin_left, opts.margin_top, opts.margin_right, opts.margin_bottom, QPrinter.Point)
|
def setup_printer(opts, for_comic=False): from calibre.gui2 import is_ok_to_use_qt if not is_ok_to_use_qt(): raise Exception('Not OK to use Qt') printer = get_pdf_printer() custom_size = get_custom_size(opts) if opts.output_profile.short_name == 'default': if custom_size is None: printer.setPaperSize(paper_size(opts.paper_size)) else: printer.setPaperSize(QSizeF(custom_size[0], custom_size[1]), unit(opts.unit)) else: w = opts.output_profile.comic_screen_size[0] if for_comic else \ opts.output_profile.width h = opts.output_profile.comic_screen_size[1] if for_comic else \ opts.output_profile.height dpi = opts.output_profile.dpi printer.setPaperSize(QSizeF(float(w) / dpi, float(h)/dpi), QPrinter.Inch) printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) return printer
|
printer = setup_printer(opts, for_comic=for_comic)
|
printer = get_pdf_printer(opts, for_comic=for_comic)
|
def get_printer_page_size(opts, for_comic=False): printer = setup_printer(opts, for_comic=for_comic) size = printer.paperSize(QPrinter.Millimeter) return size.width() / 10., size.height() / 10.
|
def get_printer(self, set_horz_margins=False): printer = get_pdf_printer() printer.setPaperSize(QSizeF(self.size[0] * 10, self.size[1] * 10), QPrinter.Millimeter) if set_horz_margins: printer.setPageMargins(0., self.opts.margin_top, 0., self.opts.margin_bottom, QPrinter.Point) else: printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(self.opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) printer.setFullPage(not set_horz_margins) return printer
|
def get_printer(self, set_horz_margins=False): printer = get_pdf_printer() printer.setPaperSize(QSizeF(self.size[0] * 10, self.size[1] * 10), QPrinter.Millimeter) if set_horz_margins: printer.setPageMargins(0., self.opts.margin_top, 0., self.opts.margin_bottom, QPrinter.Point) else: printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(self.opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) printer.setFullPage(not set_horz_margins) return printer
|
|
self.logger.debug('\tRendering item %s as %i' % (os.path.basename(str(self.view.url().toLocalFile())), len(self.combine_queue))) printer = self.get_printer(set_horz_margins=True)
|
self.logger.debug('\tRendering item %s as %i.pdf' % (os.path.basename(str(self.view.url().toLocalFile())), len(self.combine_queue))) printer = get_pdf_printer(self.opts)
|
def _render_html(self, ok): if ok: item_path = os.path.join(self.tmp_path, '%i.pdf' % len(self.combine_queue)) self.logger.debug('\tRendering item %s as %i' % (os.path.basename(str(self.view.url().toLocalFile())), len(self.combine_queue))) printer = self.get_printer(set_horz_margins=True) printer.setOutputFileName(item_path) self.view.print_(printer) self._render_book()
|
printer = get_pdf_printer() printer.setPaperSize(QSizeF(self.size[0] * 10, self.size[1] * 10), QPrinter.Millimeter) printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(self.opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat)
|
printer = get_pdf_printer(self.opts)
|
def render_images(self, outpath, mi, items): printer = get_pdf_printer() printer.setPaperSize(QSizeF(self.size[0] * 10, self.size[1] * 10), QPrinter.Millimeter) printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(self.opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) printer.setOutputFileName(outpath) printer.setDocName(mi.title) printer.setCreator(u'%s [%s]'%(__appname__, __version__)) # Seems to be no way to set author printer.setFullPage(True)
|
printer.setFullPage(True)
|
def render_images(self, outpath, mi, items): printer = get_pdf_printer() printer.setPaperSize(QSizeF(self.size[0] * 10, self.size[1] * 10), QPrinter.Millimeter) printer.setPageMargins(0, 0, 0, 0, QPrinter.Point) printer.setOrientation(orientation(self.opts.orientation)) printer.setOutputFormat(QPrinter.PdfFormat) printer.setOutputFileName(outpath) printer.setDocName(mi.title) printer.setCreator(u'%s [%s]'%(__appname__, __version__)) # Seems to be no way to set author printer.setFullPage(True)
|
|
try: article = self.feed_objects[f].articles[a] except: self.log.exception('Failed to get article object for postprocessing') pass else: self.populate_article_metadata(article, ans, first_fetch)
|
if job_info: url, f, a, feed_len = job_info try: article = self.feed_objects[f].articles[a] except: self.log.exception('Failed to get article object for postprocessing') pass else: self.populate_article_metadata(article, ans, first_fetch)
|
def _postprocess_html(self, soup, first_fetch, job_info): if self.no_stylesheets: for link in list(soup.findAll('link', type=re.compile('css')))+list(soup.findAll('style')): link.extract() head = soup.find('head') if not head: head = soup.find('body') if not head: head = soup.find(True) style = BeautifulSoup(u'<style type="text/css" title="override_css">%s</style>'%(self.template_css +'\n\n'+(self.extra_css if self.extra_css else ''))).find('style') head.insert(len(head.contents), style) if first_fetch and job_info: url, f, a, feed_len = job_info body = soup.find('body') if body is not None: templ = self.navbar.generate(False, f, a, feed_len, not self.has_single_feed, url, __appname__, center=self.center_navbar, extra_css=self.extra_css) elem = BeautifulSoup(templ.render(doctype='xhtml').decode('utf-8')).find('div') body.insert(0, elem) if self.remove_javascript: for script in list(soup.findAll('script')): script.extract() for o in soup.findAll(onload=True): del o['onload']
|
if barename(child.tag) == 'body':
|
if isinstance(child.tag, (unicode, str)) and barename(child.tag) == 'body':
|
def first_pass(data): try: data = etree.fromstring(data, parser=parser) except etree.XMLSyntaxError, err: self.oeb.log.exception('Initial parse failed:') repl = lambda m: ENTITYDEFS.get(m.group(1), m.group(0)) data = ENTITY_RE.sub(repl, data) try: data = etree.fromstring(data, parser=parser) except etree.XMLSyntaxError, err: self.oeb.logger.warn('Parsing file %r as HTML' % self.href) if err.args and err.args[0].startswith('Excessive depth'): from lxml.html import soupparser data = soupparser.fromstring(data) else: data = html.fromstring(data) data.attrib.pop('xmlns', None) for elem in data.iter(tag=etree.Comment): if elem.text: elem.text = elem.text.strip('-') data = etree.tostring(data, encoding=unicode) try: data = etree.fromstring(data, parser=parser) except etree.XMLSyntaxError: data = etree.fromstring(data, parser=RECOVER_PARSER) return data
|
elem.getparent().remove(elem)
|
if hasattr(elem, 'getparent'): elem.getparent().remove(elem) else: elem = SPAN(elem)
|
def _generate(self, article, style=None, extra_css=None): content = article.content if article.content else '' summary = article.summary if article.summary else '' text = content if len(content) > len(summary) else summary head = HEAD(TITLE(article.title)) if style: head.append(STYLE(style, type='text/css')) if extra_css: head.append(STYLE(extra_css, type='text/css'))
|
aTag.insert(0, escape(title['author']))
|
aTag.insert(0, title['author'])
|
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir if self.verbose: print self.updateProgressFullStep("generateHTMLDescriptions()")
|
etree.SubElement(trees[subitem].find(".//body"), "img").text = line[len(CALIBRE_SNB_IMG_TAG):]
|
prefix = ProcessFileName(os.path.dirname(self.item.href)) if prefix != '': etree.SubElement(trees[subitem].find(".//body"), "img").text = \ prefix + '_' + line[len(CALIBRE_SNB_IMG_TAG):] else: etree.SubElement(trees[subitem].find(".//body"), "img").text = \ line[len(CALIBRE_SNB_IMG_TAG):]
|
def mlize(self): output = [ u'' ] stylizer = Stylizer(self.item.data, self.item.href, self.oeb_book, self.opts, self.opts.output_profile) content = unicode(etree.tostring(self.item.data.find(XHTML('body')), encoding=unicode)) content = self.remove_newlines(content) trees = { } for subitem, subtitle in self.subitems: snbcTree = etree.Element("snbc") etree.SubElement(etree.SubElement(snbcTree, "head"), "title").text = subtitle etree.SubElement(snbcTree, "body") trees[subitem] = snbcTree output.append(u'%s%s\n\n' % (CALIBRE_SNB_BM_TAG, "")) output += self.dump_text(self.subitems, etree.fromstring(content), stylizer) output = self.cleanup_text(u''.join(output))
|
return
|
id_ = elem.get('id', None) if id_: elem.clear() elem.text = None elem.set('id', id_) else: return
|
def mobimlize_elem(self, elem, stylizer, bstate, istates, ignore_valign=False): if not isinstance(elem.tag, basestring) \ or namespace(elem.tag) != XHTML_NS: return style = stylizer.style(elem) # <mbp:frame-set/> does not exist lalalala if style['display'] in ('none', 'oeb-page-head', 'oeb-page-foot') \ or style['visibility'] == 'hidden': return tag = barename(elem.tag) istate = copy.copy(istates[-1]) istate.rendered = False istate.list_num = 0 istates.append(istate) left = 0 display = style['display'] isblock = not display.startswith('inline') isblock = isblock and style['float'] == 'none' isblock = isblock and tag != 'br' if isblock: bstate.para = None istate.halign = style['text-align'] istate.indent = style['text-indent'] if style['margin-left'] == 'auto' \ and style['margin-right'] == 'auto': istate.halign = 'center' margin = asfloat(style['margin-left']) padding = asfloat(style['padding-left']) if tag != 'body': left = margin + padding istate.left += left vmargin = asfloat(style['margin-top']) bstate.vmargin = max((bstate.vmargin, vmargin)) vpadding = asfloat(style['padding-top']) if vpadding > 0: bstate.vpadding += bstate.vmargin bstate.vmargin = 0 bstate.vpadding += vpadding elif not istate.href: margin = asfloat(style['margin-left']) padding = asfloat(style['padding-left']) lspace = margin + padding if lspace > 0: spaces = int(round((lspace * 3) / style['font-size'])) elem.text = (u'\xa0' * spaces) + (elem.text or '') margin = asfloat(style['margin-right']) padding = asfloat(style['padding-right']) rspace = margin + padding if rspace > 0: spaces = int(round((rspace * 3) / style['font-size'])) if len(elem) == 0: elem.text = (elem.text or '') + (u'\xa0' * spaces) else: last = elem[-1] last.text = (last.text or '') + (u'\xa0' * spaces) if bstate.content and style['page-break-before'] in PAGE_BREAKS: bstate.pbreak = True istate.fsize = self.mobimlize_font(style['font-size']) istate.italic = True if style['font-style'] == 'italic' else False weight = style['font-weight'] istate.bold = weight in ('bold', 'bolder') or asfloat(weight) > 400 istate.preserve = (style['white-space'] in ('pre', 'pre-wrap')) istate.bgcolor = style['background-color'] istate.fgcolor = style['color'] istate.strikethrough = style['text-decoration'] == 'line-through' istate.underline = style['text-decoration'] == 'underline' if 'monospace' in style['font-family']: istate.family = 'monospace' elif 'sans-serif' in style['font-family']: istate.family = 'sans-serif' else: istate.family = 'serif' if 'id' in elem.attrib: istate.ids.add(elem.attrib['id']) if 'name' in elem.attrib: istate.ids.add(elem.attrib['name']) if tag == 'a' and 'href' in elem.attrib: istate.href = elem.attrib['href'] istate.attrib.clear() if tag == 'img' and 'src' in elem.attrib: istate.attrib['src'] = elem.attrib['src'] istate.attrib['align'] = 'baseline' for prop in ('width', 'height'): if style[prop] != 'auto': value = style[prop] if value == getattr(self.profile, prop): result = '100%' else: try: ems = int(round(float(value) / self.profile.fbase)) except: continue result = "%dem" % ems istate.attrib[prop] = result elif tag == 'hr' and asfloat(style['width']) > 0: prop = style['width'] / self.profile.width istate.attrib['width'] = "%d%%" % int(round(prop * 100)) elif display == 'table': tag = 'table' elif display == 'table-row': tag = 'tr' elif display == 'table-cell': tag = 'td' if tag in TABLE_TAGS and self.ignore_tables: tag = 'span' if tag == 'td' else 'div'
|
date = self.check_timestamp(record, book, path) if date is not None: self.update_text_record(record, book, date, path, i)
|
self.update_text_record(record, book, path, i)
|
def update(self, booklists, collections_attributes): debug_print('Starting update', collections_attributes) for i, booklist in booklists.items(): playlist_map = self.build_id_playlist_map(i) debug_print('Updating XML Cache:', i) root = self.record_roots[i] lpath_map = self.build_lpath_map(root) for book in booklist: path = os.path.join(self.prefixes[i], *(book.lpath.split('/'))) record = lpath_map.get(book.lpath, None) if record is None: record = self.create_text_record(root, i, book.lpath) date = self.check_timestamp(record, book, path) if date is not None: self.update_text_record(record, book, date, path, i) # Ensure the collections in the XML database are recorded for # this book if book.device_collections is None: book.device_collections = [] book.device_collections = playlist_map.get(book.lpath, []) self.update_playlists(i, root, booklist, collections_attributes) # Update the device collections because update playlist could have added # some new ones. debug_print('In update/ Starting refresh of device_collections') for i, booklist in booklists.items(): playlist_map = self.build_id_playlist_map(i) for book in booklist: book.device_collections = playlist_map.get(book.lpath, []) self.fix_ids() debug_print('Finished update')
|
def check_timestamp(self, record, book, path):
|
def update_text_record(self, record, book, path, bl_index):
|
def check_timestamp(self, record, book, path): ''' Checks the timestamp in the Sony DB against the file. If different, return the file timestamp. Otherwise return None. ''' timestamp = os.path.getmtime(path) date = strftime(timestamp) if date != record.get('date', None): return date return None
|
Checks the timestamp in the Sony DB against the file. If different, return the file timestamp. Otherwise return None.
|
Update the Sony database from the book. This is done if the timestamp in the db differs from the timestamp on the file.
|
def check_timestamp(self, record, book, path): ''' Checks the timestamp in the Sony DB against the file. If different, return the file timestamp. Otherwise return None. ''' timestamp = os.path.getmtime(path) date = strftime(timestamp) if date != record.get('date', None): return date return None
|
return date return None def update_text_record(self, record, book, date, path, bl_index): ''' Update the Sony database from the book. This is done if the timestamp in the db differs from the timestamp on the file. ''' record.set('date', date)
|
record.set('date', date)
|
def check_timestamp(self, record, book, path): ''' Checks the timestamp in the Sony DB against the file. If different, return the file timestamp. Otherwise return None. ''' timestamp = os.path.getmtime(path) date = strftime(timestamp) if date != record.get('date', None): return date return None
|
self.library.layout().addWidget(self.cover_flow)
|
self.cb_layout.addWidget(self.cover_flow)
|
def initialize(self, library_path, db, listener, actions): opts = self.opts self.last_time = datetime.datetime.now() self.preferences_action, self.quit_action = actions self.library_path = library_path self.spare_servers = [] self.must_restart_before_config = False # Initialize fontconfig in a separate thread as this can be a lengthy # process if run for the first time on this machine from calibre.utils.fonts import fontconfig self.fc = fontconfig self.listener = Listener(listener) self.check_messages_timer = QTimer() self.connect(self.check_messages_timer, SIGNAL('timeout()'), self.another_instance_wants_to_talk) self.check_messages_timer.start(1000)
|
self.library_view.verticalHeader().sectionClicked.connect(self.view_specific_book)
|
def initialize(self, library_path, db, listener, actions): opts = self.opts self.last_time = datetime.datetime.now() self.preferences_action, self.quit_action = actions self.library_path = library_path self.spare_servers = [] self.must_restart_before_config = False # Initialize fontconfig in a separate thread as this can be a lengthy # process if run for the first time on this machine from calibre.utils.fonts import fontconfig self.fc = fontconfig self.listener = Listener(listener) self.check_messages_timer = QTimer() self.connect(self.check_messages_timer, SIGNAL('timeout()'), self.another_instance_wants_to_talk) self.check_messages_timer.start(1000)
|
|
if not is_gui_thread(): return Dispatcher(self.recount)(*args)
|
def recount(self, *args): if not is_gui_thread(): # Re-call in GUI thread return Dispatcher(self.recount)(*args) ci = self.currentIndex() if not ci.isValid(): ci = self.indexAt(QPoint(10, 10)) try: self.model().refresh() except: #Database connection could be closed if an integrity check is happening pass if ci.isValid(): self.scrollTo(ci, QTreeView.PositionAtTop)
|
|
if ci.isValid(): self.scrollTo(ci, QTreeView.PositionAtTop)
|
if path: idx = self.model().index_for_path(path) if idx.isValid(): self.setCurrentIndex(idx) self.scrollTo(idx, QTreeView.PositionAtCenter)
|
def recount(self, *args): if not is_gui_thread(): # Re-call in GUI thread return Dispatcher(self.recount)(*args) ci = self.currentIndex() if not ci.isValid(): ci = self.indexAt(QPoint(10, 10)) try: self.model().refresh() except: #Database connection could be closed if an integrity check is happening pass if ci.isValid(): self.scrollTo(ci, QTreeView.PositionAtTop)
|
translated.append(word)
|
translated.append(word.capitalize())
|
def generateSortTitle(self, title): # Convert the actual title to a string suitable for sorting. # Ignore leading stop words # Optionally convert leading numbers to strings from calibre.ebooks.metadata import title_sort # Strip stop words title_words = title_sort(title).split() translated = []
|
target = open(targetpath, "wb")
|
try: target = open(targetpath, "wb") except IOError: targetpath = sanitize_file_name(targetpath) target = open(targetpath, "wb")
|
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ # build the destination pathname, replacing # forward slashes to platform specific separators. if targetpath[-1:] == "/": targetpath = targetpath[:-1]
|
abspath = os.path.abspath(*file_name.split('/'))
|
abspath = os.path.abspath(os.path.join(*file_name.split('/')))
|
def is_match(fname): return (name is not None and fname == name) or \ (match is not None and match.search(fname) is not None)
|
massage.append((re.compile(r'&(\S+?);'), lambda match: entity_to_unicode(match, encoding=self.encoding)))
|
enc = 'cp1252' if callable(self.encoding) or self.encoding is None else self.encoding massage.append((re.compile(r'&(\S+?);'), lambda match: entity_to_unicode(match, encoding=enc)))
|
def index_to_soup(self, url_or_raw, raw=False): ''' Convenience method that takes an URL to the index page and returns a `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/documentation.html>`_ of it.
|
try: self.parent.contents.remove(self) except ValueError: pass
|
idx = None for i, x in enumerate(self.parent.contents): if x is self: idx = i break if idx is not None: self.parent.contents.pop(idx)
|
def extract(self): """Destructively rips this element out of the tree.""" if self.parent: try: self.parent.contents.remove(self) except ValueError: pass
|
from PyQt4.Qt import QFile, QImage, Qt
|
def setup_desktop_integration(self): try: from PyQt4.Qt import QFile, QImage, Qt
|
|
render_svg(QFile(I('mimetypes/lrf.svg')), 'calibre-lrf.png')
|
render_img('mimetypes/lrf.svg', 'calibre-lrf.png')
|
def setup_desktop_integration(self): try: from PyQt4.Qt import QFile, QImage, Qt
|
p = QImage(I('lt.png')).scaledToHeight(128, Qt.SmoothTransformation) p.save('calibre-gui.png')
|
render_img('lt.png', 'calibre-gui.png')
|
def setup_desktop_integration(self): try: from PyQt4.Qt import QFile, QImage, Qt
|
render_svg(QFile(I('viewer.svg')), 'calibre-viewer.png')
|
render_img('viewer.svg', 'calibre-viewer.png')
|
def setup_desktop_integration(self): try: from PyQt4.Qt import QFile, QImage, Qt
|
def render_svg(image, dest, width=128, height=128): from PyQt4.QtGui import QPainter, QImage from PyQt4.QtSvg import QSvgRenderer image = image.readAll() if hasattr(image, 'readAll') else image svg = QSvgRenderer(image) painter = QPainter() image = QImage(width, height, QImage.Format_ARGB32) painter.begin(image) painter.setRenderHints(QPainter.Antialiasing|QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform|QPainter.HighQualityAntialiasing) painter.setCompositionMode(QPainter.CompositionMode_SourceOver) svg.render(painter) painter.end() if dest is None: return image image.save(dest)
|
def render_img(image, dest, width=128, height=128): from PyQt4.Qt import QImage, Qt img = QImage(I(image)).scaled(width, height, Qt.IgnoreAspectRatio, Qt.SmoothTransformation) img.save(dest)
|
def render_svg(image, dest, width=128, height=128): from PyQt4.QtGui import QPainter, QImage from PyQt4.QtSvg import QSvgRenderer image = image.readAll() if hasattr(image, 'readAll') else image svg = QSvgRenderer(image) painter = QPainter() image = QImage(width, height, QImage.Format_ARGB32) painter.begin(image) painter.setRenderHints(QPainter.Antialiasing|QPainter.TextAntialiasing|QPainter.SmoothPixmapTransform|QPainter.HighQualityAntialiasing) painter.setCompositionMode(QPainter.CompositionMode_SourceOver) svg.render(painter) painter.end() if dest is None: return image image.save(dest)
|
sys.argv = old_argv
|
def main(): # Handy tab-completers for %cd, %run, import etc. # Try commenting this out if you have completion problems/slowness import ipy_stock_completers # uncomment if you want to get ipython -p sh behaviour # without having to use command line switches import ipy_profile_sh # Configure your favourite editor? # Good idea e.g. for %edit os.path.isfile import ipy_editors # Choose one of these: #ipy_editors.scite() #ipy_editors.scite('c:/opt/scite/scite.exe') #ipy_editors.komodo() #ipy_editors.idle() # ... or many others, try 'ipy_editors??' after import to see them # Or roll your own: #ipy_editors.install_editor("c:/opt/jed +$line $file") ipy_editors.kate() o = ip.options # An example on how to set options #o.autocall = 1 o.system_verbose = 0 o.confirm_exit = 0
|
|
href = item.abshref(val)
|
href = urlnormalize(item.abshref(val))
|
def serialize_elem(self, elem, item, nsrmap=NSRMAP): buffer = self.buffer if not isinstance(elem.tag, basestring) \ or namespace(elem.tag) not in nsrmap: return tag = prefixname(elem.tag, nsrmap) # Previous layers take care of @name id = elem.attrib.pop('id', None) if id is not None: href = '#'.join((item.href, id)) offset = self.anchor_offset or buffer.tell() self.id_offsets[urlnormalize(href)] = offset if self.anchor_offset is not None and \ tag == 'a' and not elem.attrib and \ not len(elem) and not elem.text: return self.anchor_offset = buffer.tell() buffer.write('<') buffer.write(tag) if elem.attrib: for attr, val in elem.attrib.items(): if namespace(attr) not in nsrmap: continue attr = prefixname(attr, nsrmap) buffer.write(' ') if attr == 'href': if self.serialize_href(val, item): continue elif attr == 'src': href = item.abshref(val) if href in self.images: index = self.images[href] buffer.write('recindex="%05d"' % index) continue buffer.write(attr) buffer.write('="') self.serialize_text(val, quot=True) buffer.write('"') if elem.text or len(elem) > 0: buffer.write('>') if elem.text: self.anchor_offset = None self.serialize_text(elem.text) for child in elem: self.serialize_elem(child, item) if child.tail: self.anchor_offset = None self.serialize_text(child.tail) buffer.write('</%s>' % tag) else: buffer.write('/>')
|
def get(name, category, field='name'): ans = self.conn.get('SELECT DISTINCT %s FROM %s'%(field, name)) ans = [x[0].strip() for x in ans] try: ans.remove('') except ValueError: pass categories[category] = list(map(Tag, ans)) tags = categories[category] if name != 'data': for tag in tags: id = self.conn.get('SELECT id FROM %s WHERE %s=?'%(name, field), (tag.name,), all=False) tag.id = id for tag in tags: if tag.id is not None: tag.count = self.conn.get('SELECT COUNT(id) FROM books_%s_link WHERE %s=?'%(name, category), (tag.id,), all=False)
|
for x in ('tags', 'series', 'news', 'publishers', 'authors'): query = 'SELECT id,name,count FROM tag_browser_'+x if sort_on_count: query += ' ORDER BY count DESC'
|
def get(name, category, field='name'): ans = self.conn.get('SELECT DISTINCT %s FROM %s'%(field, name)) ans = [x[0].strip() for x in ans] try: ans.remove('') except ValueError: pass categories[category] = list(map(Tag, ans)) tags = categories[category] if name != 'data': for tag in tags: id = self.conn.get('SELECT id FROM %s WHERE %s=?'%(name, field), (tag.name,), all=False) tag.id = id for tag in tags: if tag.id is not None: tag.count = self.conn.get('SELECT COUNT(id) FROM books_%s_link WHERE %s=?'%(name, category), (tag.id,), all=False) else: for tag in tags: tag.count = self.conn.get('SELECT COUNT(format) FROM data WHERE format=?', (tag.name,), all=False) tags.sort(reverse=sort_on_count, cmp=(lambda x,y:cmp(x.count,y.count)) if sort_on_count else (lambda x,y:cmp(x.name, y.name)))
|
for tag in tags: tag.count = self.conn.get('SELECT COUNT(format) FROM data WHERE format=?', (tag.name,), all=False) tags.sort(reverse=sort_on_count, cmp=(lambda x,y:cmp(x.count,y.count)) if sort_on_count else (lambda x,y:cmp(x.name, y.name))) for x in (('authors', 'author'), ('tags', 'tag'), ('publishers', 'publisher'), ('series', 'series')): get(*x) get('data', 'format', 'format') categories['news'] = [] newspapers = self.conn.get('SELECT name FROM tags WHERE id IN (SELECT DISTINCT tag FROM books_tags_link WHERE book IN (select book from books_tags_link where tag IN (SELECT id FROM tags WHERE name=?)))', (_('News'),)) if newspapers: newspapers = [f[0] for f in newspapers] try: newspapers.remove(_('News')) except ValueError: pass categories['news'] = list(map(Tag, newspapers)) for tag in categories['news']: tag.count = self.conn.get('SELECT COUNT(id) FROM books_tags_link WHERE tag IN (SELECT DISTINCT id FROM tags WHERE name=?)', (tag.name,), all=False)
|
query += ' ORDER BY name ASC' data = self.conn.get(query) category = x if x in ('series', 'news') else x[:-1] categories[category] = [Tag(r[1], count=r[2], id=r[0]) for r in data] categories['format'] = [] for fmt in self.conn.get('SELECT DISTINCT format FROM data'): fmt = fmt[0] count = self.conn.get('SELECT COUNT(id) FROM data WHERE format="%s"'%fmt, all=False) categories['format'].append(Tag(fmt, count=count)) if sort_on_count: categories['format'].sort(cmp=lambda x,y:cmp(x.count, y.count), reverse=True) else: categories['format'].sort(cmp=lambda x,y:cmp(x.name, y.name))
|
def get(name, category, field='name'): ans = self.conn.get('SELECT DISTINCT %s FROM %s'%(field, name)) ans = [x[0].strip() for x in ans] try: ans.remove('') except ValueError: pass categories[category] = list(map(Tag, ans)) tags = categories[category] if name != 'data': for tag in tags: id = self.conn.get('SELECT id FROM %s WHERE %s=?'%(name, field), (tag.name,), all=False) tag.id = id for tag in tags: if tag.id is not None: tag.count = self.conn.get('SELECT COUNT(id) FROM books_%s_link WHERE %s=?'%(name, category), (tag.id,), all=False) else: for tag in tags: tag.count = self.conn.get('SELECT COUNT(format) FROM data WHERE format=?', (tag.name,), all=False) tags.sort(reverse=sort_on_count, cmp=(lambda x,y:cmp(x.count,y.count)) if sort_on_count else (lambda x,y:cmp(x.name, y.name)))
|
re.compile(r'''<meta\s+?[^<>]+?content\s*=\s*['"][^'"]*?charset=([-a-z0-9]+)[^'"]*?['"][^<>]*>''',
|
re.compile(r'''<meta\s+?[^<>]*?content\s*=\s*['"][^'"]*?charset=([-a-z0-9]+)[^'"]*?['"][^<>]*>''',
|
def detect(aBuf): import calibre.ebooks.chardet.universaldetector as universaldetector u = universaldetector.UniversalDetector() u.reset() u.feed(aBuf) u.close() return u.result
|
self.write_rating = False
|
def __init__(self, window, rows, db): QDialog.__init__(self, window) Ui_MetadataBulkDialog.__init__(self) self.setupUi(self) self.db = db self.ids = [ db.id(r) for r in rows] self.write_series = False self.write_rating = False self.changed = False QObject.connect(self.button_box, SIGNAL("accepted()"), self.sync) QObject.connect(self.rating, SIGNAL('valueChanged(int)'), self.rating_changed)
|
|
QObject.connect(self.rating, SIGNAL('valueChanged(int)'), self.rating_changed)
|
def __init__(self, window, rows, db): QDialog.__init__(self, window) Ui_MetadataBulkDialog.__init__(self) self.setupUi(self) self.db = db self.ids = [ db.id(r) for r in rows] self.write_series = False self.write_rating = False self.changed = False QObject.connect(self.button_box, SIGNAL("accepted()"), self.sync) QObject.connect(self.rating, SIGNAL('valueChanged(int)'), self.rating_changed)
|
|
if self.write_rating:
|
if self.rating.value() != -1:
|
def sync(self): for id in self.ids: au = unicode(self.authors.text()) if au: au = string_to_authors(au) self.db.set_authors(id, au, notify=False) if self.auto_author_sort.isChecked(): aut = self.db.authors(id, index_is_id=True) aut = aut if aut else '' aut = [a.strip().replace('|', ',') for a in aut.strip().split(',')] x = authors_to_sort_string(aut) if x: self.db.set_author_sort(id, x, notify=False) aus = unicode(self.author_sort.text()) if aus and self.author_sort.isEnabled(): self.db.set_author_sort(id, aus, notify=False) if self.write_rating: self.db.set_rating(id, 2*self.rating.value(), notify=False) pub = unicode(self.publisher.text()) if pub: self.db.set_publisher(id, pub, notify=False) remove_tags = unicode(self.remove_tags.text()).strip() if remove_tags: remove_tags = [i.strip() for i in remove_tags.split(',')] self.db.unapply_tags(id, remove_tags, notify=False) tags = unicode(self.tags.text()).strip() if tags: tags = map(lambda x: x.strip(), tags.split(',')) self.db.set_tags(id, tags, append=True, notify=False) if self.write_series: self.db.set_series(id, unicode(self.series.currentText()), notify=False)
|
FORMATS = ['epub', 'pdf', 'txt']
|
FORMATS = ['epub', 'pdf', 'htm', 'html', 'txt']
|
def can_handle(cls, dev, debug=False): return dev[-1] == '1.00' and not dev[-2] and not dev[-3]
|
if attr == 'series':
|
if attr == 'series' or getattr(book, 'series', None) == category:
|
def get_collections(self, collection_attributes): from calibre.devices.usbms.driver import debug_print debug_print('Starting get_collections:', prefs['manage_device_metadata']) collections = {} series_categories = set([]) # This map of sets is used to avoid linear searches when testing for # book equality collections_lpaths = {} for book in self: # Make sure we can identify this book via the lpath lpath = getattr(book, 'lpath', None) if lpath is None: continue # Decide how we will build the collections. The default: leave the # book in all existing collections. Do not add any new ones. attrs = ['device_collections'] if getattr(book, '_new_book', False): if prefs['manage_device_metadata'] == 'manual': # Ensure that the book is in all the book's existing # collections plus all metadata collections attrs += collection_attributes else: # For new books, both 'on_send' and 'on_connect' do the same # thing. The book's existing collections are ignored. Put # the book in collections defined by its metadata. attrs = collection_attributes elif prefs['manage_device_metadata'] == 'on_connect': # For existing books, modify the collections only if the user # specified 'on_connect' attrs = collection_attributes for attr in attrs: attr = attr.strip() val = getattr(book, attr, None) if not val: continue if isbytestring(val): val = val.decode(preferred_encoding, 'replace') if isinstance(val, (list, tuple)): val = list(val) elif isinstance(val, unicode): val = [val] for category in val: if attr == 'tags' and len(category) > 1 and \ category[0] == '[' and category[-1] == ']': continue if category not in collections: collections[category] = [] collections_lpaths[category] = set() if lpath not in collections_lpaths[category]: collections_lpaths[category].add(lpath) collections[category].append(book) if attr == 'series': series_categories.add(category) # Sort collections for category, books in collections.items(): def tgetter(x): return getattr(x, 'title_sort', 'zzzz') books.sort(cmp=lambda x,y:cmp(tgetter(x), tgetter(y))) if category in series_categories: # Ensures books are sub sorted by title def getter(x): return getattr(x, 'series_index', sys.maxint) books.sort(cmp=lambda x,y:cmp(getter(x), getter(y))) return collections
|
'style="padding-right:2em">%s</td><td valign="top">%s</td></tr></table>'
|
'style="padding-right:2em; width:40%%">%s</td><td valign="top">%s</td></tr></table>'
|
def color_to_string(col): ans = '#000000' if col.isValid(): col = col.toRgb() if col.isValid(): ans = unicode(col.name()) return ans
|
_('Email to')+' '+account, self)
|
_('Email to')+' '+account)
|
def __init__(self, parent=None): QMenu.__init__(self, parent) self.group = QActionGroup(self) self.actions = [] self._memory = []
|
_('Email to')+' '+account, self)
|
_('Email to')+' '+account+ _(' and delete from library'))
|
def __init__(self, parent=None): QMenu.__init__(self, parent) self.group = QActionGroup(self) self.actions = [] self._memory = []
|
self.connect(action1, SIGNAL('a_s(QAction)'), self.action_triggered) self.connect(action2, SIGNAL('a_s(QAction)'), self.action_triggered)
|
action1.a_s.connect(self.action_triggered) action2.a_s.connect(self.action_triggered)
|
def __init__(self, parent=None): QMenu.__init__(self, parent) self.group = QActionGroup(self) self.actions = [] self._memory = []
|
return self.stanza(search=kwargs.get('search', None), sortby=kwargs.get('sortby',None), authorid=kwargs.get('authorid',None), tagid=kwargs.get('tagid',None), seriesid=kwargs.get('seriesid',None), offset=kwargs.get('offset', 0))
|
return self.opds(version=0)
|
def index(self, **kwargs): 'The / URL' ua = cherrypy.request.headers.get('User-Agent', '').strip() want_opds = \ cherrypy.request.headers.get('Stanza-Device-Name', 919) != 919 or \ cherrypy.request.headers.get('Want-OPDS-Catalog', 919) != 919 or \ ua.startswith('Stanza')
|
'mp3', 'pdb', 'azw', 'azw1'):
|
'mp3', 'pdb', 'azw', 'azw1', 'fb2'):
|
def run(self, archive): is_rar = archive.lower().endswith('.rar') if is_rar: from calibre.libunrar import extract_member, names else: zf = ZipFile(archive, 'r')
|
self.view_menu.addAction(_('View specific format'))
|
ac = self.view_menu.addAction(_('View specific format')) ac.setShortcut(Qt.AltModifier+Qt.Key_V)
|
def __init__(self, listener, opts, actions, parent=None): self.preferences_action, self.quit_action = actions self.spare_servers = [] MainWindow.__init__(self, opts, parent) # Initialize fontconfig in a separate thread as this can be a lengthy # process if run for the first time on this machine from calibre.utils.fonts import fontconfig self.fc = fontconfig self.listener = Listener(listener) self.check_messages_timer = QTimer() self.connect(self.check_messages_timer, SIGNAL('timeout()'), self.another_instance_wants_to_talk) self.check_messages_timer.start(1000)
|
if guess_mimetype(path)[0] == ('text/html'): data = self._reformat(data)
|
def ExtractFiles(self, output_dir=os.getcwdu()): for path in self.Contents(): lpath = os.path.join(output_dir, path) self._ensure_dir(lpath) try: data = self.GetFile(path) except: self.log.exception('Failed to extract %s from CHM, ignoring'%path) continue if lpath.find(';') != -1: # fix file names with ";<junk>" at the end, see _reformat() lpath = lpath.split(';')[0] try: with open(lpath, 'wb') as f: if guess_mimetype(path)[0] == ('text/html'): data = self._reformat(data) f.write(data) except: if iswindows and len(lpath) > 250: self.log.warn('%r filename too long, skipping'%path) continue raise self._extracted = True files = [x for x in os.listdir(output_dir) if os.path.isfile(os.path.join(output_dir, x))] if self.hhc_path not in files: for f in files: if f.lower() == self.hhc_path.lower(): self.hhc_path = f break if self.hhc_path not in files and files: self.hhc_path = files[0]
|
|
def _reformat(self, data):
|
def _reformat(self, data, htmlpath):
|
def _reformat(self, data): try: data = xml_to_unicode(data, strip_encoding_pats=True)[0] soup = BeautifulSoup(data) except ValueError: # hit some strange encoding problems... self.log.exception("Unable to parse html for cleaning, leaving it") return data # nuke javascript... [s.extract() for s in soup('script')] # remove forward and back nav bars from the top/bottom of each page # cos they really fuck with the flow of things and generally waste space # since we can't use [a,b] syntax to select arbitrary items from a list # we'll have to do this manually... # only remove the tables, if they have an image with an alt attribute # containing prev, next or team t = soup('table') if t: if (t[0].previousSibling is None or t[0].previousSibling.previousSibling is None): try: alt = t[0].img['alt'].lower() if alt.find('prev') != -1 or alt.find('next') != -1 or alt.find('team') != -1: t[0].extract() except: pass if (t[-1].nextSibling is None or t[-1].nextSibling.nextSibling is None): try: alt = t[-1].img['alt'].lower() if alt.find('prev') != -1 or alt.find('next') != -1 or alt.find('team') != -1: t[-1].extract() except: pass # for some very odd reason each page's content appears to be in a table # too. and this table has sub-tables for random asides... grr.
|
for img in soup('img'): try: while img['src'].startswith('../'): img['src'] = img['src'][3:] img['src'] = img['src'].split(';')[0] except KeyError: pass
|
base = os.path.dirname(htmlpath) for img in soup('img', src=True): src = img['src'] ipath = os.path.join(base, *src.split('/')) if os.path.exists(ipath): continue src = src.split(';')[0] if not src: continue ipath = os.path.join(base, *src.split('/')) if not os.path.exists(ipath): while src.startswith('../'): src = src[3:] img['src'] = src
|
def _reformat(self, data): try: data = xml_to_unicode(data, strip_encoding_pats=True)[0] soup = BeautifulSoup(data) except ValueError: # hit some strange encoding problems... self.log.exception("Unable to parse html for cleaning, leaving it") return data # nuke javascript... [s.extract() for s in soup('script')] # remove forward and back nav bars from the top/bottom of each page # cos they really fuck with the flow of things and generally waste space # since we can't use [a,b] syntax to select arbitrary items from a list # we'll have to do this manually... # only remove the tables, if they have an image with an alt attribute # containing prev, next or team t = soup('table') if t: if (t[0].previousSibling is None or t[0].previousSibling.previousSibling is None): try: alt = t[0].img['alt'].lower() if alt.find('prev') != -1 or alt.find('next') != -1 or alt.find('team') != -1: t[0].extract() except: pass if (t[-1].nextSibling is None or t[-1].nextSibling.nextSibling is None): try: alt = t[-1].img['alt'].lower() if alt.find('prev') != -1 or alt.find('next') != -1 or alt.find('team') != -1: t[-1].extract() except: pass # for some very odd reason each page's content appears to be in a table # too. and this table has sub-tables for random asides... grr.
|
self.workaround_sony_quirks()
|
def convert(self, oeb, output_path, input_plugin, opts, log): self.log, self.opts, self.oeb = log, opts, oeb
|
|
from calibre import browser
|
def fetch(self): if not self.isbn: return from calibre import browser from calibre.ebooks.metadata import MetaInformation import json br = browser() try: raw = br.open( 'http://status.calibre-ebook.com/library_thing/metadata/'+self.isbn ).read() data = json.loads(raw) if not data: return if 'error' in data: raise Exception(data['error']) if 'series' in data and 'series_index' in data: mi = MetaInformation(self.title, []) mi.series = data['series'] mi.series_index = data['series_index'] self.results = mi except Exception, e: self.exception = e self.tb = traceback.format_exc()
|
|
results = sorted(results, cmp=lambda x, y : cmp( (x.comments.strip() if x.comments else ''), (y.comments.strip() if y.comments else '') ), reverse=True)
|
results = list(filter(filter_metadata_results, results)) check_for_covers(results) words = ("the", "a", "an", "of", "and") prefix_pat = re.compile(r'^(%s)\s+'%("|".join(words))) trailing_paren_pat = re.compile(r'\(.*\)$') whitespace_pat = re.compile(r'\s+') def sort_func(x, y): def cleanup_title(s): s = s.strip().lower() s = prefix_pat.sub(' ', s) s = trailing_paren_pat.sub('', s) s = whitespace_pat.sub(' ', s) return s.strip() t = cleanup_title(title) x_title = cleanup_title(x.title) y_title = cleanup_title(y.title) tx = cmp(t, x_title) ty = cmp(t, y_title) result = 0 if abs(tx) == abs(ty) else abs(tx) - abs(ty) if result == 0: result = -cmp(x.has_cover, y.has_cover) if result == 0: cx = len(x.comments.strip() if x.comments else '') cy = len(y.comments.strip() if y.comments else '') t = (cx + cy) / 20 result = cy - cx if abs(result) < t: result = 0 return result results = sorted(results, cmp=sort_func) if len(results) > 1: if not results[0].comments or len(results[0].comments) == 0: for r in results[1:]: if title.lower() == r.title[:len(title)].lower() and r.comments and len(r.comments): results[0].comments = r.comments break
|
def search(title=None, author=None, publisher=None, isbn=None, isbndb_key=None, verbose=0): assert not(title is None and author is None and publisher is None and \ isbn is None) from calibre.customize.ui import metadata_sources, migrate_isbndb_key migrate_isbndb_key() if isbn is not None: isbn = re.sub(r'[^a-zA-Z0-9]', '', isbn).upper() fetchers = list(metadata_sources(isbndb_key=isbndb_key)) with MetadataSources(fetchers) as manager: manager(title, author, publisher, isbn, verbose) manager.join() results = list(fetchers[0].results) for fetcher in fetchers[1:]: merge_results(results, fetcher.results) results = sorted(results, cmp=lambda x, y : cmp( (x.comments.strip() if x.comments else ''), (y.comments.strip() if y.comments else '') ), reverse=True) return results, [(x.name, x.exception, x.tb) for x in fetchers]
|
pml = re.sub(r'\\.\d=""', '', pml) pml = re.sub(r'\\.=""', '', pml) pml = re.sub(r'\\.\d', '', pml) pml = re.sub(r'\\.', '', pml)
|
pml = re.sub(r'\\C\d=".+*"', '', pml) pml = re.sub(r'\\Fn=".+*"', '', pml) pml = re.sub(r'\\Sd=".+*"', '', pml) pml = re.sub(r'\\.=".+*"', '', pml) pml = re.sub(r'\\X\d', '', pml) pml = re.sub(r'\\S[pbd]', '', pml) pml = re.sub(r'\\Fn', '', pml)
|
def strip_pml(self, pml): pml = re.sub(r'\\.\d=""', '', pml) pml = re.sub(r'\\.=""', '', pml) pml = re.sub(r'\\.\d', '', pml) pml = re.sub(r'\\.', '', pml) pml = re.sub(r'\\a\d\d\d', '', pml) pml = re.sub(r'\\U\d\d\d\d', '', pml) pml.replace('\r\n', ' ') pml.replace('\n', ' ') pml.replace('\r', ' ')
|
return self.static('index.html')
|
return self.static('index.html').replace('{prefix}', self.opts.url_prefix)
|
def old(self, **kwargs): return self.static('index.html')
|
if ' href += '
|
if href.startswith(' href = href[1:]
|
def dump_text(self, elem, stylizer, page, tag_stack=[]): if not isinstance(elem.tag, basestring) \ or namespace(elem.tag) != XHTML_NS: return []
|
rows.append(u'<tr><td>%s</td><td>%s</td></tr>'%
|
rows.append(u'<tr><td>%s</td><td> </td><td>%s</td></tr>'%
|
def initialize(self, name, default, help): variables = sorted(FORMAT_ARG_DESCS.keys()) rows = [] for var in variables: rows.append(u'<tr><td>%s</td><td>%s</td></tr>'% (var, FORMAT_ARG_DESCS[var])) table = u'<table>%s</table>'%(u'\n'.join(rows)) self.template_variables.setText(table)
|
tmpl = preprocess_template(self.opt_template.text()) fa = {} try: safe_format(tmpl, fa) except Exception, err: error_dialog(self, _('Invalid template'), '<p>'+_('The template %s is invalid:')%tmpl + \ '<br>'+str(err), show=True) return False
|
def validate(self): # TODO: NEWMETA: I haven't figured out how to get the custom columns # into here, so for the moment make all templates valid. return True
|
|
if l == 3: return dt.strftime('%a') return dt.strftime('%A')
|
if l == 3: return strf('%a') return strf('%A')
|
def format_day(mo): l = len(mo.group(0)) if l == 1: return '%d'%dt.day if l == 2: return '%02d'%dt.day if l == 3: return dt.strftime('%a') return dt.strftime('%A')
|
if l == 3: return dt.strftime('%b') return dt.strftime('%B')
|
if l == 3: return strf('%b') return strf('%B')
|
def format_month(mo): l = len(mo.group(0)) if l == 1: return '%d'%dt.month if l == 2: return '%02d'%dt.month if l == 3: return dt.strftime('%b') return dt.strftime('%B')
|
error_dialog(None, _('Repairing failed'),
|
error_dialog(self.splash_screen, _('Repairing failed'),
|
def initialize_db_stage2(self, db, tb): repair_pd = getattr(self, 'repair_pd', None) if repair_pd is not None: repair_pd.cancel()
|
candidate = choose_dir(None, 'choose calibre library',
|
candidate = choose_dir(self.splash_screen, 'choose calibre library',
|
def initialize_db_stage2(self, db, tb): repair_pd = getattr(self, 'repair_pd', None) if repair_pd is not None: repair_pd.cancel()
|
error_dialog(None, _('Bad database location'),
|
error_dialog(self.splash_screen, _('Bad database location'),
|
def initialize_db_stage2(self, db, tb): repair_pd = getattr(self, 'repair_pd', None) if repair_pd is not None: repair_pd.cancel()
|
repair = question_dialog(None, _('Corrupted database'),
|
repair = question_dialog(self.splash_screen, _('Corrupted database'),
|
def initialize_db(self): db = None try: db = LibraryDatabase2(self.library_path) except (sqlite.Error, DatabaseException): repair = question_dialog(None, _('Corrupted database'), _('Your calibre database appears to be corrupted. Do ' 'you want calibre to try and repair it automatically? ' 'If you say No, a new empty calibre library will be created.'), det_msg=traceback.format_exc() ) if repair: self.repair_pd = QProgressDialog(_('Repairing database. This ' 'can take a very long time for a large collection'), QString(), 0, 0) self.repair_pd.setWindowModality(Qt.WindowModal) self.repair_pd.show()
|
error_dialog(None, _('Bad database location'),
|
error_dialog(self.splash_screen, _('Bad database location'),
|
def initialize_db(self): db = None try: db = LibraryDatabase2(self.library_path) except (sqlite.Error, DatabaseException): repair = question_dialog(None, _('Corrupted database'), _('Your calibre database appears to be corrupted. Do ' 'you want calibre to try and repair it automatically? ' 'If you say No, a new empty calibre library will be created.'), det_msg=traceback.format_exc() ) if repair: self.repair_pd = QProgressDialog(_('Repairing database. This ' 'can take a very long time for a large collection'), QString(), 0, 0) self.repair_pd.setWindowModality(Qt.WindowModal) self.repair_pd.show()
|
self.sorted_on[1], reset=reset)
|
self.sorted_on[1], reset=False) if reset: self.reset()
|
def resort(self, reset=True): if self.sorted_on: self.sort(self.column_map.index(self.sorted_on[0]), self.sorted_on[1], reset=reset)
|
def icu_collator(s1, s2, func=None): return cmp(func(unicode(s1)), func(unicode(s2)))
|
def icu_collator(s1, s2): return strcmp(force_unicode(s1, 'utf-8'), force_unicode(s2, 'utf-8'))
|
def icu_collator(s1, s2, func=None): return cmp(func(unicode(s1)), func(unicode(s2)))
|
from calibre.utils.icu import sort_key self.conn.create_collation('icucollate', partial(icu_collator, func=sort_key))
|
self.conn.create_collation('icucollate', icu_collator)
|
def connect(self): self.conn = sqlite.connect(self.path, factory=Connection, detect_types=sqlite.PARSE_DECLTYPES|sqlite.PARSE_COLNAMES) self.conn.execute('pragma cache_size=5000') encoding = self.conn.execute('pragma encoding').fetchone()[0] c_ext_loaded = load_c_extensions(self.conn) self.conn.row_factory = sqlite.Row if self.row_factory else lambda cursor, row : list(row) self.conn.create_aggregate('concat', 1, Concatenate) if not c_ext_loaded: self.conn.create_aggregate('sortconcat', 2, SortedConcatenate) self.conn.create_aggregate('sort_concat', 2, SafeSortedConcatenate) self.conn.create_collation('PYNOCASE', partial(pynocase, encoding=encoding)) if tweaks['title_series_sorting'] == 'strictly_alphabetic': self.conn.create_function('title_sort', 1, lambda x:x) else: self.conn.create_function('title_sort', 1, title_sort) self.conn.create_function('author_to_author_sort', 1, _author_to_author_sort) self.conn.create_function('uuid4', 0, lambda : str(uuid.uuid4())) # Dummy functions for dynamically created filters self.conn.create_function('books_list_filter', 1, lambda x: 1) from calibre.utils.icu import sort_key self.conn.create_collation('icucollate', partial(icu_collator, func=sort_key))
|
elif category == 'newest': ids = self.search_cache('')
|
all_ids = self.search_cache('') if category == 'newest': ids = all_ids
|
def browse_matches(self, category=None, cid=None, list_sort=None): if list_sort: list_sort = unquote(list_sort) if not cid: raise cherrypy.HTTPError(404, 'invalid category id: %r'%cid) categories = self.categories_cache()
|
ids = self.search_cache('')
|
ids = all_ids
|
def browse_matches(self, category=None, cid=None, list_sort=None): if list_sort: list_sort = unquote(list_sort) if not cid: raise cherrypy.HTTPError(404, 'invalid category id: %r'%cid) categories = self.categories_cache()
|
def get_sort_key(x): sk = x.s if isinstance(sk, unicode): sk = sort_key(sk) return sk kf = get_sort_key
|
kf = lambda x :sort_key(x.s)
|
def get_sort_key(x): sk = x.s if isinstance(sk, unicode): sk = sort_key(sk) return sk
|
self.logger.debug('Skipping article %s (%s) from feed %s as it is too old.'%(title, article.localtime.strftime('%a, %d %b, %Y %H:%M'), self.title))
|
t = strftime(u'%a, %d %b, %Y %H:%M', article.localtime.timetuple()) self.logger.debug('Skipping article %s (%s) from feed %s as it is too old.'% (title, t, self.title))
|
def populate_from_preparsed_feed(self, title, articles, oldest_article=7, max_articles_per_feed=100): self.title = unicode(title if title else _('Unknown feed')) self.description = '' self.image_url = None self.articles = [] self.added_articles = []
|
if DEBUG: prints('Save-to-disk using plugboard:', fmt, cpb)
|
prints('Save-to-disk using plugboard:', fmt, cpb)
|
def save_book_to_disk(id, db, root, opts, length): mi = db.get_metadata(id, index_is_id=True) available_formats = db.formats(id, index_is_id=True) if not available_formats: available_formats = [] else: available_formats = [x.lower().strip() for x in available_formats.split(',')] if opts.formats == 'all': asked_formats = available_formats else: asked_formats = [x.lower().strip() for x in opts.formats.split(',')] formats = set(available_formats).intersection(set(asked_formats)) if not formats: return True, id, mi.title components = get_components(opts.template, mi, id, opts.timefmt, length, ascii_filename if opts.asciiize else sanitize_file_name, to_lowercase=opts.to_lowercase, replace_whitespace=opts.replace_whitespace) base_path = os.path.join(root, *components) base_name = os.path.basename(base_path) dirpath = os.path.dirname(base_path) # Don't test for existence first are the test could fail but # another worker process could create the directory before # the call to makedirs try: os.makedirs(dirpath) except BaseException: if not os.path.exists(dirpath): raise cdata = db.cover(id, index_is_id=True) if opts.save_cover: if cdata is not None: with open(base_path+'.jpg', 'wb') as f: f.write(cdata) mi.cover = base_name+'.jpg' else: mi.cover = None if opts.write_opf: opf = metadata_to_opf(mi) with open(base_path+'.opf', 'wb') as f: f.write(opf) if cdata is not None: mi.cover_data = ('jpg', cdata) mi.cover = None written = False for fmt in formats: global plugboard_save_to_disk_value, plugboard_any_format_value dev_name = plugboard_save_to_disk_value plugboards = db.prefs.get('plugboards', {}) cpb = None if fmt in plugboards: cpb = plugboards[fmt] if dev_name in cpb: cpb = cpb[dev_name] else: cpb = None if cpb is None and plugboard_any_format_value in plugboards: cpb = plugboards[plugboard_any_format_value] if dev_name in cpb: cpb = cpb[dev_name] else: cpb = None if DEBUG: prints('Save-to-disk using plugboard:', fmt, cpb) data = db.format(id, fmt, index_is_id=True) if data is None: continue else: written = True if opts.update_metadata: stream = cStringIO.StringIO() stream.write(data) stream.seek(0) try: if cpb: newmi = mi.deepcopy_metadata() newmi.template_to_attribute(mi, cpb) else: newmi = mi set_metadata(stream, newmi, fmt) except: if DEBUG: traceback.print_exc() stream.seek(0) data = stream.read() fmt_path = base_path+'.'+str(fmt) with open(fmt_path, 'wb') as f: f.write(data) return not written, id, mi.title
|
self.title = title if title else _('Unknown feed')
|
self.title = unicode(title if title else _('Unknown feed'))
|
def populate_from_preparsed_feed(self, title, articles, oldest_article=7, max_articles_per_feed=100): self.title = title if title else _('Unknown feed') self.description = '' self.image_url = None self.articles = [] self.added_articles = []
|
self._model.sorting_done.connect(self.sorting_done)
|
self._model.sorting_done.connect(self.sorting_done, type=Qt.QueuedConnection)
|
def __init__(self, parent, modelcls=BooksModel): QTableView.__init__(self, parent)
|
self.output_dir = os.getcwd()
|
self.output_dir = os.path.abspath(os.getcwdu())
|
def __init__(self, options, log, progress_reporter): ''' Initialize the recipe. :param options: Parsed commandline options :param parser: Command line option parser. Used to intelligently merge options. :param progress_reporter: A Callable that takes two arguments: progress (a number between 0 and 1) and a string message. The message should be optional. ''' self.log = log if not isinstance(self.title, unicode): self.title = unicode(self.title, 'utf-8', 'replace')
|
self.output_dir = os.path.abspath(self.output_dir)
|
def __init__(self, options, log, progress_reporter): ''' Initialize the recipe. :param options: Parsed commandline options :param parser: Command line option parser. Used to intelligently merge options. :param progress_reporter: A Callable that takes two arguments: progress (a number between 0 and 1) and a string message. The message should be optional. ''' self.log = log if not isinstance(self.title, unicode): self.title = unicode(self.title, 'utf-8', 'replace')
|
|
if ImageID == None:
|
if ImageID != None:
|
def delete_images(self, ImageID): if ImageID == None: path_prefix = '.kobo/images/' path = self._main_prefix + path_prefix + ImageID
|
if book['author_sort'][0] != current_letter :
|
if book['author_sort'][0].upper() != current_letter :
|
def generateHTMLByAuthor(self): # Write books by author A-Z if self.verbose: print self.updateProgressFullStep("generateHTMLByAuthor()") friendly_name = "By Author"
|
opts.creator = "calibre"
|
opts.creator = "Calibre"
|
def run(self, path_to_output, opts, db, notification=DummyReporter()): from calibre.utils.logging import Log
|
self.db_book_uuid_cache = set()
|
def __init__(self): self.db_book_uuid_cache = set() self.device_error_dialog = error_dialog(self, _('Error'), _('Error communicating with device'), ' ') self.device_error_dialog.setModal(Qt.NonModal) self.device_connected = None self.emailer = Emailer() self.emailer.start() self.device_manager = DeviceManager(Dispatcher(self.device_detected), self.job_manager, Dispatcher(self.status_bar.show_message)) self.device_manager.start()
|
|
if reset:
|
if reset or not hasattr(self, 'db_book_title_cache'): if not hasattr(self, 'library_view') or self.library_view is None: return
|
def set_books_in_library(self, booklists, reset=False): if reset: # First build a cache of the library, so the search isn't On**2 self.db_book_title_cache = {} self.db_book_uuid_cache = {} db = self.library_view.model().db for id in db.data.iterallids(): mi = db.get_metadata(id, index_is_id=True) title = re.sub('(?u)\W|[_]', '', mi.title.lower()) if title not in self.db_book_title_cache: self.db_book_title_cache[title] = \ {'authors':{}, 'author_sort':{}, 'db_ids':{}} if mi.authors: authors = authors_to_string(mi.authors).lower() authors = re.sub('(?u)\W|[_]', '', authors) self.db_book_title_cache[title]['authors'][authors] = mi if mi.author_sort: aus = mi.author_sort.lower() aus = re.sub('(?u)\W|[_]', '', aus) self.db_book_title_cache[title]['author_sort'][aus] = mi self.db_book_title_cache[title]['db_ids'][mi.application_id] = mi self.db_book_uuid_cache[mi.uuid] = mi.application_id
|
q = Queue.Queue() self.dispatch_signal.emit(q, args, kwargs) return q.get()
|
with self.lock: self.dispatch_signal.emit(self.q, args, kwargs) res = self.q.get() return res
|
def __call__(self, *args, **kwargs): q = Queue.Queue() self.dispatch_signal.emit(q, args, kwargs) return q.get()
|
sbase = max(sizes.items(), key=operator.itemgetter(1))[0]
|
try: sbase = max(sizes.items(), key=operator.itemgetter(1))[0] except: sbase = 12.0
|
def baseline_spine(self): sizes = defaultdict(float) for item in self.oeb.spine: html = item.data stylizer = self.stylizers[item] body = html.find(XHTML('body')) fsize = self.context.source.fbase self.baseline_node(body, stylizer, sizes, fsize) sbase = max(sizes.items(), key=operator.itemgetter(1))[0] self.oeb.logger.info( "Source base font size is %0.05fpt" % sbase) return sbase
|
cats = [(u'<li><a title="{2} {0}" href="/browse/category/{1}"> </a>'
|
cats = [(u'<li><a title="{2} {0}" href="{3}/browse/category/{1}"> </a>'
|
def getter(x): return category_meta[x]['name'].lower()
|
level = 1
|
def s_text_list(self, tag, attrs): """ To know which level we're at, we have to count the number of <text:list> elements on the tagstack. """ name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) if self.generate_css: self.opentag('%s' % self.listtypes.get(name), {'class':"%s_%d" % (name, level) }) else: self.opentag('%s' % self.listtypes.get(name)) self.purgedata()
|
|
level = self.tagstack.count_tags(tag) + 1
|
def s_text_list(self, tag, attrs): """ To know which level we're at, we have to count the number of <text:list> elements on the tagstack. """ name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) if self.generate_css: self.opentag('%s' % self.listtypes.get(name), {'class':"%s_%d" % (name, level) }) else: self.opentag('%s' % self.listtypes.get(name)) self.purgedata()
|
|
self.opentag('%s' % self.listtypes.get(name), {'class':"%s_%d" % (name, level) })
|
self.opentag('%s' % self.listtypes.get(list_class,'UL'), {'class': list_class })
|
def s_text_list(self, tag, attrs): """ To know which level we're at, we have to count the number of <text:list> elements on the tagstack. """ name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) if self.generate_css: self.opentag('%s' % self.listtypes.get(name), {'class':"%s_%d" % (name, level) }) else: self.opentag('%s' % self.listtypes.get(name)) self.purgedata()
|
self.opentag('%s' % self.listtypes.get(name))
|
self.opentag('%s' % self.listtypes.get(list_class,'UL'))
|
def s_text_list(self, tag, attrs): """ To know which level we're at, we have to count the number of <text:list> elements on the tagstack. """ name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) if self.generate_css: self.opentag('%s' % self.listtypes.get(name), {'class':"%s_%d" % (name, level) }) else: self.opentag('%s' % self.listtypes.get(name)) self.purgedata()
|
level = 1
|
def e_text_list(self, tag, attrs): self.writedata() name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) self.closetag(self.listtypes.get(name)) self.purgedata()
|
|
level = self.tagstack.count_tags(tag) + 1
|
def e_text_list(self, tag, attrs): self.writedata() name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) self.closetag(self.listtypes.get(name)) self.purgedata()
|
|
self.closetag(self.listtypes.get(name))
|
list_class = "%s_%d" % (name, level) self.closetag(self.listtypes.get(list_class,'UL'))
|
def e_text_list(self, tag, attrs): self.writedata() name = attrs.get( (TEXTNS,'style-name') ) if name: name = name.replace(".","_") level = 1 else: # FIXME: If a list is contained in a table cell or text box, # the list level must return to 1, even though the table or # textbox itself may be nested within another list. level = self.tagstack.count_tags(tag) + 1 name = self.tagstack.rfindattr( (TEXTNS,'style-name') ) self.closetag(self.listtypes.get(name)) self.purgedata()
|
self.listtypes[name] = 'ul'
|
def s_text_list_level_style_bullet(self, tag, attrs): """ CSS doesn't have the ability to set the glyph to a particular character, so we just go through the available glyphs """ name = self.tagstack.rfindattr( (STYLENS,'name') ) self.listtypes[name] = 'ul' level = attrs[(TEXTNS,'level')] self.prevstyle = self.currentstyle self.currentstyle = ".%s_%s" % ( name.replace(".","_"), level) self.stylestack.append(self.currentstyle) self.styledict[self.currentstyle] = {}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.