rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
export_dir = choose_dir(self, 'Export Catalog Directory', 'Select destination for %s.%s' % (job.catalog_title, job.fmt.lower()))
export_dir = choose_dir(self, _('Export Catalog Directory'), _('Select destination for %s.%s') % (job.catalog_title, job.fmt.lower()))
def catalog_generated(self, job): if job.failed: return self.job_exception(job) id = self.library_view.model().add_catalog(job.catalog_file_path, job.catalog_title) self.library_view.model().reset() if job.catalog_sync: sync = dynamic.get('catalogs_to_be_synced', set([])) sync.add(id) dynamic.set('catalogs_to_be_synced', sync) self.status_bar.showMessage(_('Catalog generated.'), 3000) self.sync_catalogs()
self.tags.setText(', '.join(tags.split(',')) if tags else '')
self.original_tags = ', '.join(tags.split(',')) if tags else '' self.tags.setText(self.original_tags)
def __init__(self, window, row, db, accepted_callback=None, cancel_all=False): ResizableDialog.__init__(self, window) self.bc_box.layout().setAlignment(self.cover, Qt.AlignCenter|Qt.AlignHCenter) self.cancel_all = False if cancel_all: self.__abort_button = self.button_box.addButton(self.button_box.Abort) self.__abort_button.setToolTip(_('Abort the editing of all remaining books')) self.connect(self.__abort_button, SIGNAL('clicked()'), self.do_cancel_all) self.splitter.setStretchFactor(100, 1) self.db = db self.pi = ProgressIndicator(self) self.accepted_callback = accepted_callback self.id = db.id(row) self.row = row self.cover_data = None self.formats_changed = False self.formats.setAcceptDrops(True) self.cover_changed = False self.cpixmap = None self.cover.setAcceptDrops(True) self.pubdate.setMinimumDate(QDate(100,1,1)) pubdate_format = tweaks['gui_pubdate_display_format'] if pubdate_format is not None: self.pubdate.setDisplayFormat(pubdate_format) self.date.setMinimumDate(QDate(100,1,1))
partial(self.edit_metadata, False))
partial(self.edit_metadata, False, bulk=False))
def __init__(self): md = QMenu() md.addAction(_('Edit metadata individually'), partial(self.edit_metadata, False)) md.addSeparator() md.addAction(_('Edit metadata in bulk'), partial(self.edit_metadata, False, bulk=True)) md.addSeparator() md.addAction(_('Download metadata and covers'), partial(self.download_metadata, False, covers=True)) md.addAction(_('Download only metadata'), partial(self.download_metadata, False, covers=False)) md.addAction(_('Download only covers'), partial(self.download_metadata, False, covers=True, set_metadata=False, set_social_metadata=False)) md.addAction(_('Download only social metadata'), partial(self.download_metadata, False, covers=False, set_metadata=False, set_social_metadata=True)) self.metadata_menu = md
if not line.find(CALIBRE_SNB_PRE_TAG) == 0:
pos = line.find(CALIBRE_SNB_PRE_TAG) if pos == -1:
def mlize(self): output = [ u'' ] stylizer = Stylizer(self.item.data, self.item.href, self.oeb_book, self.opts, self.opts.output_profile) content = unicode(etree.tostring(self.item.data.find(XHTML('body')), encoding=unicode))
etree.CDATA(line[len(CALIBRE_SNB_PRE_TAG):])
etree.CDATA(line[pos+len(CALIBRE_SNB_PRE_TAG):])
def mlize(self): output = [ u'' ] stylizer = Stylizer(self.item.data, self.item.href, self.oeb_book, self.opts, self.opts.output_profile) content = unicode(etree.tostring(self.item.data.find(XHTML('body')), encoding=unicode))
run_level = 1,
def __init__(self, in_file, bug_handler, copy = None, #run_level = 1,
def __from_ms_to_utf8(self,match_obj): uni_char = int(match_obj.group(1)) if uni_char < 0: uni_char += 65536 return '&
def __init__(self, in_file, bug_handler, copy = None, #run_level = 1,
self.__uc_value.pop()
def __unicode_process(self, token): #change scope in if token == '\{': self.__uc_value.append(self.__uc_value[-1]) #basic error handling self.__reini_utf8_counters() return token #change scope out: evaluate dict and rebuild elif token == '\}': #self.__uc_value.pop() self.__reini_utf8_counters() return token #add a uc control elif token[:3] == '\uc': self.__uc_value[-1] = int(token[3:]) self.__reini_utf8_counters() return token #handle uc skippable char elif self.__uc_char: #if token[:1] == "\" and token[:1] == "\" pass #go for real \u token match_obj = self.__utf_exp.match(token) if match_obj is not None: #get value and handle negative case uni_char = int(match_obj.group(1)) uni_len = len(match_obj.group(1)) + 2 if uni_char < 0: uni_char += 65536 uni_char = unichr(uni_char).encode('ascii', 'xmlcharrefreplace') #if not uc0 if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1] #there is only an unicode char if len(token)<= uni_len: return uni_char #an unicode char and something else #must be after as it is splited on \ elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:]) return uni_char + token[uni_len:] #if not uc0 and chars else: for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:] #print('uc: ' + str(self.__uc_value) + 'uni: ' + str(uni_char) + 'token: ' + token) #default return token
elif self.__uc_bin: self.__uc_bin = False return ''
def __unicode_process(self, token): #change scope in if token == '\{': self.__uc_value.append(self.__uc_value[-1]) #basic error handling self.__reini_utf8_counters() return token #change scope out: evaluate dict and rebuild elif token == '\}': #self.__uc_value.pop() self.__reini_utf8_counters() return token #add a uc control elif token[:3] == '\uc': self.__uc_value[-1] = int(token[3:]) self.__reini_utf8_counters() return token #handle uc skippable char elif self.__uc_char: #if token[:1] == "\" and token[:1] == "\" pass #go for real \u token match_obj = self.__utf_exp.match(token) if match_obj is not None: #get value and handle negative case uni_char = int(match_obj.group(1)) uni_len = len(match_obj.group(1)) + 2 if uni_char < 0: uni_char += 65536 uni_char = unichr(uni_char).encode('ascii', 'xmlcharrefreplace') #if not uc0 if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1] #there is only an unicode char if len(token)<= uni_len: return uni_char #an unicode char and something else #must be after as it is splited on \ elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:]) return uni_char + token[uni_len:] #if not uc0 and chars else: for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:] #print('uc: ' + str(self.__uc_value) + 'uni: ' + str(uni_char) + 'token: ' + token) #default return token
pass
if token[:4] == '\bin': self.__uc_char -=1 self.__uc_bin = True return '' elif token[:1] == "\\" : self.__uc_char -=1 return '' else: return self.__remove_uc_chars(0, token)
def __unicode_process(self, token): #change scope in if token == '\{': self.__uc_value.append(self.__uc_value[-1]) #basic error handling self.__reini_utf8_counters() return token #change scope out: evaluate dict and rebuild elif token == '\}': #self.__uc_value.pop() self.__reini_utf8_counters() return token #add a uc control elif token[:3] == '\uc': self.__uc_value[-1] = int(token[3:]) self.__reini_utf8_counters() return token #handle uc skippable char elif self.__uc_char: #if token[:1] == "\" and token[:1] == "\" pass #go for real \u token match_obj = self.__utf_exp.match(token) if match_obj is not None: #get value and handle negative case uni_char = int(match_obj.group(1)) uni_len = len(match_obj.group(1)) + 2 if uni_char < 0: uni_char += 65536 uni_char = unichr(uni_char).encode('ascii', 'xmlcharrefreplace') #if not uc0 if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1] #there is only an unicode char if len(token)<= uni_len: return uni_char #an unicode char and something else #must be after as it is splited on \ elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:]) return uni_char + token[uni_len:] #if not uc0 and chars else: for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:] #print('uc: ' + str(self.__uc_value) + 'uni: ' + str(uni_char) + 'token: ' + token) #default return token
if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1]
self.__uc_char = self.__uc_value[-1]
def __unicode_process(self, token): #change scope in if token == '\{': self.__uc_value.append(self.__uc_value[-1]) #basic error handling self.__reini_utf8_counters() return token #change scope out: evaluate dict and rebuild elif token == '\}': #self.__uc_value.pop() self.__reini_utf8_counters() return token #add a uc control elif token[:3] == '\uc': self.__uc_value[-1] = int(token[3:]) self.__reini_utf8_counters() return token #handle uc skippable char elif self.__uc_char: #if token[:1] == "\" and token[:1] == "\" pass #go for real \u token match_obj = self.__utf_exp.match(token) if match_obj is not None: #get value and handle negative case uni_char = int(match_obj.group(1)) uni_len = len(match_obj.group(1)) + 2 if uni_char < 0: uni_char += 65536 uni_char = unichr(uni_char).encode('ascii', 'xmlcharrefreplace') #if not uc0 if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1] #there is only an unicode char if len(token)<= uni_len: return uni_char #an unicode char and something else #must be after as it is splited on \ elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:]) return uni_char + token[uni_len:] #if not uc0 and chars else: for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:] #print('uc: ' + str(self.__uc_value) + 'uni: ' + str(uni_char) + 'token: ' + token) #default return token
elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:])
elif not self.__uc_char:
def __unicode_process(self, token): #change scope in if token == '\{': self.__uc_value.append(self.__uc_value[-1]) #basic error handling self.__reini_utf8_counters() return token #change scope out: evaluate dict and rebuild elif token == '\}': #self.__uc_value.pop() self.__reini_utf8_counters() return token #add a uc control elif token[:3] == '\uc': self.__uc_value[-1] = int(token[3:]) self.__reini_utf8_counters() return token #handle uc skippable char elif self.__uc_char: #if token[:1] == "\" and token[:1] == "\" pass #go for real \u token match_obj = self.__utf_exp.match(token) if match_obj is not None: #get value and handle negative case uni_char = int(match_obj.group(1)) uni_len = len(match_obj.group(1)) + 2 if uni_char < 0: uni_char += 65536 uni_char = unichr(uni_char).encode('ascii', 'xmlcharrefreplace') #if not uc0 if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1] #there is only an unicode char if len(token)<= uni_len: return uni_char #an unicode char and something else #must be after as it is splited on \ elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:]) return uni_char + token[uni_len:] #if not uc0 and chars else: for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:] #print('uc: ' + str(self.__uc_value) + 'uni: ' + str(uni_char) + 'token: ' + token) #default return token
for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:]
return uni_char + self.__remove_uc_chars(uni_len, token)
def __unicode_process(self, token): #change scope in if token == '\{': self.__uc_value.append(self.__uc_value[-1]) #basic error handling self.__reini_utf8_counters() return token #change scope out: evaluate dict and rebuild elif token == '\}': #self.__uc_value.pop() self.__reini_utf8_counters() return token #add a uc control elif token[:3] == '\uc': self.__uc_value[-1] = int(token[3:]) self.__reini_utf8_counters() return token #handle uc skippable char elif self.__uc_char: #if token[:1] == "\" and token[:1] == "\" pass #go for real \u token match_obj = self.__utf_exp.match(token) if match_obj is not None: #get value and handle negative case uni_char = int(match_obj.group(1)) uni_len = len(match_obj.group(1)) + 2 if uni_char < 0: uni_char += 65536 uni_char = unichr(uni_char).encode('ascii', 'xmlcharrefreplace') #if not uc0 if self.__uc_value[-1]: self.__uc_char = self.__uc_value[-1] #there is only an unicode char if len(token)<= uni_len: return uni_char #an unicode char and something else #must be after as it is splited on \ elif not self.__uc_value[-1]: print('not only token uc0 token: ' + uni_char + token[uni_len:]) return uni_char + token[uni_len:] #if not uc0 and chars else: for i in xrange(uni_len, len(token)): if token[i] == " ": continue elif self.__uc_char > 0: self.__uc_char -= 1 else: return uni_char + token[i:] #print('uc: ' + str(self.__uc_value) + 'uni: ' + str(uni_char) + 'token: ' + token) #default return token
input_file = re.sub(self.__ms_hex_exp, "\\mshex0\g<1> ", input_file) tokens = re.split(self.__splitexp, input_file) return filter(lambda x: len(x) > 0 and x != '\n', tokens)
def __sub_reg_split(self,input_file): input_file = self.__replace_spchar.mreplace(input_file) #input_file = re.sub(self.__utf_exp, self.__from_ms_to_utf8, input_file) # line = re.sub( self.__neg_utf_exp, self.__neg_unicode_func, line) # this is for older RTF #line = re.sub(self.__par_exp, '\\par ', line) input_file = re.sub(self.__ms_hex_exp, "\\mshex0\g<1> ", input_file) #split tokens = re.split(self.__splitexp, input_file) #remove empty tokens and \n return filter(lambda x: len(x) > 0 and x != '\n', tokens) #return filter(lambda x: len(x) > 0, \ #(self.__remove_line.sub('', x) for x in tokens))
self.__utf_exp = re.compile(r"\\u(-?\d{3,6}) {0,1}")
self.__utf_exp = re.compile(r"\\u(-?\d{3,6}) ?") self.__bin_exp = re.compile(r"(?:\\bin(-?\d{0,10})[\n ]+)[01\n]+") self.__utf_ud = re.compile(r"\\{[\n ]?\\upr[\n ]?(?:\\{.*?\\})[\n ]?" + \ r"\\{[\n ]?\\*[\n ]?\\ud[\n ]?(\\{.*?\\})[\n ]?\\}[\n ]?\\}") self.__splitexp = re.compile(r"(\\[{}]|\n|\\[^\s\\{}&]+(?:[ \t\r\f\v])?)")
def __compile_expressions(self): SIMPLE_RPL = { "\\\\": "\\backslash ", "\\~": "\\~ ", "\\;": "\\; ", "&": "&amp;", "<": "&lt;", ">": "&gt;", "\\~": "\\~ ", "\\_": "\\_ ", "\\:": "\\: ", "\\-": "\\- ", # turn into a generic token to eliminate special # cases and make processing easier "\\{": "\\ob ", # turn into a generic token to eliminate special # cases and make processing easier "\\}": "\\cb ", # put a backslash in front of to eliminate special cases and # make processing easier "{": "\\{", # put a backslash in front of to eliminate special cases and # make processing easier "}": "\\}", # this is for older RTF r'\\$': '\\par ', } self.__replace_spchar = MReplace(SIMPLE_RPL) self.__ms_hex_exp = re.compile(r"\\\'([0-9a-fA-F]{2})") #r"\\\'(..)" self.__utf_exp = re.compile(r"\\u(-?\d{3,6}) {0,1}") #modify this #self.__utf_exp = re.compile(r"^\\u(-?\d{3,6})") #add \n in split for whole file reading #self.__splitexp = re.compile(r"(\\[\\{}]|{|}|\n|\\[^\s\\{}&]+(?:\s)?)") #why keep backslash whereas \is replaced before? self.__splitexp = re.compile(r"(\\[{}]|\n|\\[^\s\\{}&]+(?:[ \t\r\f\v])?)") #self.__par_exp = re.compile(r'\\$') #self.__remove_line = re.compile(r'\n+') #self.__mixed_exp = re.compile(r"(\\[a-zA-Z]+\d+)(\D+)") ##self.num_exp = re.compile(r"(\*|:|[a-zA-Z]+)(.*)")
self.__splitexp = re.compile(r"(\\[{}]|\n|\\[^\s\\{}&]+(?:[ \t\r\f\v])?)")
def __compile_expressions(self): SIMPLE_RPL = { "\\\\": "\\backslash ", "\\~": "\\~ ", "\\;": "\\; ", "&": "&amp;", "<": "&lt;", ">": "&gt;", "\\~": "\\~ ", "\\_": "\\_ ", "\\:": "\\: ", "\\-": "\\- ", # turn into a generic token to eliminate special # cases and make processing easier "\\{": "\\ob ", # turn into a generic token to eliminate special # cases and make processing easier "\\}": "\\cb ", # put a backslash in front of to eliminate special cases and # make processing easier "{": "\\{", # put a backslash in front of to eliminate special cases and # make processing easier "}": "\\}", # this is for older RTF r'\\$': '\\par ', } self.__replace_spchar = MReplace(SIMPLE_RPL) self.__ms_hex_exp = re.compile(r"\\\'([0-9a-fA-F]{2})") #r"\\\'(..)" self.__utf_exp = re.compile(r"\\u(-?\d{3,6}) {0,1}") #modify this #self.__utf_exp = re.compile(r"^\\u(-?\d{3,6})") #add \n in split for whole file reading #self.__splitexp = re.compile(r"(\\[\\{}]|{|}|\n|\\[^\s\\{}&]+(?:\s)?)") #why keep backslash whereas \is replaced before? self.__splitexp = re.compile(r"(\\[{}]|\n|\\[^\s\\{}&]+(?:[ \t\r\f\v])?)") #self.__par_exp = re.compile(r'\\$') #self.__remove_line = re.compile(r'\n+') #self.__mixed_exp = re.compile(r"(\\[a-zA-Z]+\d+)(\D+)") ##self.num_exp = re.compile(r"(\*|:|[a-zA-Z]+)(.*)")
header.write(pack('>I', 0xFDE9)) header.write(iana2mobi(str(self._oeb.metadata.language[0])))
header.write(pack('>I', 0x4e4)) header.write(pack('>I', 0xFFFFFFFF))
def _generate_index(self): self._oeb.log('Generating INDX ...') self._primary_index_record = None
text = text.encode('utf-8')
text = text.encode('cp1252','replace')
def _clean_text_value(self, text): if text is not None and text.strip() : text = text.strip() if not isinstance(text, unicode): text = text.decode('utf-8', 'replace') text = text.encode('utf-8') else : text = "(none)".encode('utf-8') return text
text = "(none)".encode('utf-8')
text = text.encode('cp1252')
def _clean_text_value(self, text): if text is not None and text.strip() : text = text.strip() if not isinstance(text, unicode): text = text.decode('utf-8', 'replace') text = text.encode('utf-8') else : text = "(none)".encode('utf-8') return text
else: self.fetchEXTHFields()
self.have_exth = True self.fetchEXTHFields()
def __init__(self, stream): self.stream = stream data = self.data = StreamSlicer(stream) self.type = data[60:68]
title_offset, = struct.unpack('>L', self.record0[0x54:0x58]) title_length, = struct.unpack('>L', self.record0[0x58:0x5c]) title_in_file, = struct.unpack('%ds' % (title_length), self.record0[title_offset:title_offset + title_length])
title_offset, = unpack('>L', self.record0[0x54:0x58]) title_length, = unpack('>L', self.record0[0x58:0x5c]) title_in_file, = unpack('%ds' % (title_length), self.record0[title_offset:title_offset + title_length])
def create_exth(self, new_title=None, exth=None): # Add an EXTH block to record 0, rewrite the stream # self.hexdump(self.record0)
offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data[78+i*8:78+i*8+8])
offset, a1,a2,a3,a4 = unpack('>LBBBB', self.data[78+i*8:78+i*8+8])
def get_pdbrecords(self): pdbrecords = [] for i in xrange(self.nrecs): offset, a1,a2,a3,a4 = struct.unpack('>LBBBB', self.data[78+i*8:78+i*8+8]) flags, val = a1, a2<<16|a3<<8|a4 pdbrecords.append( [offset, flags, val] ) return pdbrecords
return self.javascript('calculate_bookmark(%d)'%(self.ypos+25), 'string')
elem = self.find_bookmark_element() if elem is None or self.element_ypos(elem) < 100: print elem, self.element_ypos(elem) bm = 'body|%f'%(float(self.ypos)/(self.height*0.7)) else: bm = unicode(elem.evaluateJavaScript( 'calculate_bookmark(%d, this)'%self.ypos).toString()) if not bm: bm = 'body|%f'%(float(self.ypos)/(self.height*0.7)) return bm
def bookmark(self): return self.javascript('calculate_bookmark(%d)'%(self.ypos+25), 'string')
for i,c in enumerate(self.model().column_map): m[c] = self.columnWidth(i)
cmap = getattr(self.model(), 'column_map', None) if cmap is not None: for i,c in enumerate(cmap): m[c] = self.columnWidth(i)
def write_settings(self): m = dynamic[self.__class__.__name__+'column width map'] if m is None: m = {} for i,c in enumerate(self.model().column_map): m[c] = self.columnWidth(i) dynamic[self.__class__.__name__+'column width map'] = m self.cw = m
targetpath = sanitize_file_name(targetpath)
components = list(os.path.split(targetpath)) components[-1] = sanitize_file_name(components[-1]) targetpath = os.sep.join(components)
def _extract_member(self, member, targetpath, pwd): """Extract the ZipInfo object 'member' to a physical file on the path targetpath. """ # build the destination pathname, replacing # forward slashes to platform specific separators. if targetpath[-1:] == "/": targetpath = targetpath[:-1]
if val <= 127:
if val == 160: buf.write('\\~') elif val <= 127:
def txt2rtf(text): if not isinstance(text, unicode): return text buf = cStringIO.StringIO() for x in text: val = ord(x) if val <= 127: buf.write(x) else: repl = ascii_text(x) c = r'\uc{2}\u{0:d}{1}'.format(val, repl, len(repl)) buf.write(c) return buf.getvalue()
self.parent.custcols[self.parent.db.field_metadata.custom_field_prefix+col] = {
self.parent.custcols[key] = {
def accept(self): col = unicode(self.column_name_box.text()).lower() if not col: return self.simple_error('', _('No lookup name was provided')) if not col.isalnum() or not col[0].isalpha(): return self.simple_error('', _('The label must contain only letters and digits, and start with a letter')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col) bad_head = False for t in self.parent.custcols: if self.parent.custcols[t]['name'] == col_heading: if not self.editing_col or self.parent.custcols[t]['colnum'] != self.orig_column_number: bad_head = True for t in self.standard_colheads: if self.standard_colheads[t] == col_heading: bad_head = True if bad_head: return self.simple_error('', _('The heading %s is already used')%col_heading) if ':' in col or ' ' in col or col.lower() != col: return self.simple_error('', _('The lookup name must be lower case and cannot contain ":"s or spaces'))
item.setData(Qt.UserRole, QVariant(col))
item.setData(Qt.UserRole, QVariant(key))
def accept(self): col = unicode(self.column_name_box.text()).lower() if not col: return self.simple_error('', _('No lookup name was provided')) if not col.isalnum() or not col[0].isalpha(): return self.simple_error('', _('The label must contain only letters and digits, and start with a letter')) col_heading = unicode(self.column_heading_box.text()) col_type = self.column_types[self.column_type_box.currentIndex()]['datatype'] if col_type == '*text': col_type='text' is_multiple = True else: is_multiple = False if not col_heading: return self.simple_error('', _('No column heading was provided')) bad_col = False if col in self.parent.custcols: if not self.editing_col or self.parent.custcols[col]['colnum'] != self.orig_column_number: bad_col = True if bad_col: return self.simple_error('', _('The lookup name %s is already used')%col) bad_head = False for t in self.parent.custcols: if self.parent.custcols[t]['name'] == col_heading: if not self.editing_col or self.parent.custcols[t]['colnum'] != self.orig_column_number: bad_head = True for t in self.standard_colheads: if self.standard_colheads[t] == col_heading: bad_head = True if bad_head: return self.simple_error('', _('The heading %s is already used')%col_heading) if ':' in col or ' ' in col or col.lower() != col: return self.simple_error('', _('The lookup name must be lower case and cannot contain ":"s or spaces'))
lb_added.sort_name.set("%s %04f" % (metadata.series, metadata.series_index))
lb_added.sort_name.set("%s %s" % (metadata.series, series_index))
def _update_iTunes_metadata(self, metadata, db_added, lb_added, this_book): ''' ''' if DEBUG: self.log.info(" ITUNES._update_iTunes_metadata()")
db_added.sort_name.set("%s %04f" % (metadata.series, metadata.series_index))
db_added.sort_name.set("%s %s" % (metadata.series, series_index))
def _update_iTunes_metadata(self, metadata, db_added, lb_added, this_book): ''' ''' if DEBUG: self.log.info(" ITUNES._update_iTunes_metadata()")
lb_added.SortName = "%s %04f" % (metadata.series, metadata.series_index)
lb_added.SortName = "%s %s" % (metadata.series, series_index)
def _update_iTunes_metadata(self, metadata, db_added, lb_added, this_book): ''' ''' if DEBUG: self.log.info(" ITUNES._update_iTunes_metadata()")
db_added.SortName = "%s %04f" % (metadata.series, metadata.series_index)
db_added.SortName = "%s %s" % (metadata.series, series_index)
def _update_iTunes_metadata(self, metadata, db_added, lb_added, this_book): ''' ''' if DEBUG: self.log.info(" ITUNES._update_iTunes_metadata()")
return self.stream.read(end - start)
try: return self.stream.read(end - start) except OverflowError: return self.stream.read(os.stat(self.stream.name).st_size - start)
def section_data(self, number): start = self.section_offset(number) if number == self.num_sections -1: end = os.stat(self.stream.name).st_size else: end = self.section_offset(number + 1) self.stream.seek(start) return self.stream.read(end - start)
'indian_express', 'india_today']
'indian_express', 'india_today', 'livemint']
def iterate_over_builtin_recipe_files(): exclude = ['craigslist', 'iht', 'outlook_india', 'toronto_sun', 'indian_express', 'india_today'] d = os.path.dirname base = os.path.join(d(d(d(d(d(d(os.path.abspath(__file__))))))), 'resources', 'recipes') for x in os.walk(base): for f in x[-1]: fbase, ext = os.path.splitext(f) if ext != '.recipe' or fbase in exclude: continue f = os.path.join(x[0], f) rid = os.path.splitext(os.path.relpath(f, base).replace(os.sep, '/'))[0] yield rid, os.path.join(x[0], f)
data = self.GetFile(path)
try: data = self.GetFile(path) except: self.log.exception('Failed to extract %s from CHM, ignoring'%path) continue
def ExtractFiles(self, output_dir=os.getcwdu()): for path in self.Contents(): lpath = os.path.join(output_dir, path) self._ensure_dir(lpath) data = self.GetFile(path) if lpath.find(';') != -1: # fix file names with ";<junk>" at the end, see _reformat() lpath = lpath.split(';')[0] try: with open(lpath, 'wb') as f: if guess_mimetype(path)[0] == ('text/html'): data = self._reformat(data) f.write(data) except: if iswindows and len(lpath) > 250: self.log.warn('%r filename too long, skipping'%path) continue raise self._extracted = True files = os.listdir(output_dir) if self.hhc_path not in files: for f in files: if f.lower() == self.hhc_path.lower(): self.hhc_path = f break if self.hhc_path not in files and files: self.hhc_path = files[0]
def set_search_string(self, txt, store_in_history=False):
def set_search_string(self, txt, store_in_history=False, emit_changed=True):
def set_search_string(self, txt, store_in_history=False): self.setFocus(Qt.OtherFocusReason) if not txt: self.clear() else: self.normalize_state() self.setEditText(txt) self.line_edit.end(False) self.changed.emit() self._do_search(store_in_history=store_in_history) self.focus_to_library.emit()
self.changed.emit()
if emit_changed: self.changed.emit()
def set_search_string(self, txt, store_in_history=False): self.setFocus(Qt.OtherFocusReason) if not txt: self.clear() else: self.normalize_state() self.setEditText(txt) self.line_edit.end(False) self.changed.emit() self._do_search(store_in_history=store_in_history) self.focus_to_library.emit()
self.search_box.set_search_string(u'search:"%s"' % qname)
self.search_box.set_search_string(u'search:"%s"' % qname, emit_changed=False)
def saved_search_selected(self, qname): qname = unicode(qname) if qname is None or not qname.strip(): self.search_box.clear() return if not saved_searches().lookup(qname): self.search_box.clear() self.setEditText(qname) return self.search_box.set_search_string(u'search:"%s"' % qname) self.setEditText(qname) self.setToolTip(saved_searches().lookup(qname))
if href in self._images: index = self._images[href] - 1 exth.write(pack('>III', 0xc9, 0x0c, index)) exth.write(pack('>III', 0xcb, 0x0c, 0)) nrecs += 2 index = self._add_thumbnail(item) if index is not None: exth.write(pack('>III', 0xca, 0x0c, index - 1)) nrecs += 1
index = self._images[href] - 1 exth.write(pack('>III', 0xc9, 0x0c, index)) exth.write(pack('>III', 0xcb, 0x0c, 0)) nrecs += 2 index = self._add_thumbnail(item) if index is not None: exth.write(pack('>III', 0xca, 0x0c, index - 1)) nrecs += 1
def _build_exth(self): oeb = self._oeb exth = StringIO() nrecs = 0 for term in oeb.metadata: if term not in EXTH_CODES: continue code = EXTH_CODES[term] items = oeb.metadata[term] if term == 'creator': if self._prefer_author_sort: creators = [unicode(c.file_as or c) for c in items] else: creators = [unicode(c) for c in items] items = ['; '.join(creators)] for item in items: data = self.COLLAPSE_RE.sub(' ', unicode(item)) if term == 'identifier': if data.lower().startswith('urn:isbn:'): data = data[9:] elif item.scheme.lower() == 'isbn': pass else: continue data = data.encode('utf-8') exth.write(pack('>II', code, len(data) + 8)) exth.write(data) nrecs += 1 if term == 'rights' : rights = unicode(oeb.metadata.rights[0]).encode('utf-8') exth.write(pack('>II', EXTH_CODES['rights'], len(rights) + 8)) exth.write(rights)
header.write(pack('>I', 0x4e4)) header.write(pack('>I', 0xFFFFFFFF))
header.write(pack('>I', 0xFDE9)) header.write(iana2mobi(str(self._oeb.metadata.language[0])))
def _generate_index(self): self._oeb.log('Generating INDX ...') self._primary_index_record = None
text = text.encode('cp1252','replace')
text = text.encode('ascii','replace')
def _clean_text_value(self, text): if not text: text = u'(none)' text = text.strip() if not isinstance(text, unicode): text = text.decode('utf-8', 'replace') text = text.encode('cp1252','replace') return text
def _write_subchapter_node(self, indxt, indices, index, offset, length, count): if self.opts.verbose > 2: pass pos = 0xc0 + indxt.tell() indices.write(pack('>H', pos)) name = "%04X"%count indxt.write(chr(len(name)) + name) indxt.write(INDXT['subchapter']) indxt.write(decint(offset, DECINT_FORWARD)) indxt.write(decint(length, DECINT_FORWARD)) indxt.write(decint(self._ctoc_map[index]['titleOffset'], DECINT_FORWARD)) indxt.write(decint(0, DECINT_FORWARD)) indxt.write(decint(0xb, DECINT_FORWARD))
def _write_chapter_node(self, indxt, indices, index, offset, length, count): # Writes an INDX1 NCXEntry of entryType 0x0F - chapter if self.opts.verbose > 2: # *** GR: Turn this off while I'm developing my code #self._oeb.log.debug('Writing TOC node to IDXT:', node.title, 'href:', node.href) pass
ans = cmp(self._data[x][9].lower(), self._data[y][9].lower())
ans = cmp(self._data[x][sidx].lower(), self._data[y][sidx].lower())
def seriescmp(self, x, y): try: ans = cmp(self._data[x][9].lower(), self._data[y][9].lower()) except AttributeError: # Some entries may be None ans = cmp(self._data[x][9], self._data[y][9]) if ans != 0: return ans return cmp(self._data[x][10], self._data[y][10])
ans = cmp(self._data[x][9], self._data[y][9])
ans = cmp(self._data[x][sidx], self._data[y][sidx])
def seriescmp(self, x, y): try: ans = cmp(self._data[x][9].lower(), self._data[y][9].lower()) except AttributeError: # Some entries may be None ans = cmp(self._data[x][9], self._data[y][9]) if ans != 0: return ans return cmp(self._data[x][10], self._data[y][10])
return cmp(self._data[x][10], self._data[y][10])
sidx = self.FIELD_MAP['series_index'] return cmp(self._data[x][sidx], self._data[y][sidx])
def seriescmp(self, x, y): try: ans = cmp(self._data[x][9].lower(), self._data[y][9].lower()) except AttributeError: # Some entries may be None ans = cmp(self._data[x][9], self._data[y][9]) if ans != 0: return ans return cmp(self._data[x][10], self._data[y][10])
self.dirty_books_referencing('publisher', id, commit=False)
self.dirty_books_referencing('publisher', old_id, commit=False)
def delete_publisher_using_id(self, old_id): self.dirty_books_referencing('publisher', id, commit=False) self.conn.execute('''DELETE FROM books_publishers_link WHERE publisher=?''', (old_id,)) self.conn.execute('DELETE FROM publishers WHERE id=?', (old_id,)) self.conn.commit()
return ContentType
return ContentID
def contentid_from_path(self, path, ContentType): if ContentType == 6: ContentID = os.path.splitext(path)[0] # Remove the prefix on the file. it could be either ContentID = ContentID.replace(self._main_prefix, '') if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, '') else: # ContentType = 16 ContentID = path ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/") if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/") ContentID = ContentID.replace("\\", '/') return ContentType
if tag.tag in ('country-region', 'place', 'placetype', 'placename', 'state', 'city', 'street', 'address', 'content', 'form'):
if tag.tag and barename(tag.tag.lower()) in \ ('country-region', 'place', 'placetype', 'placename', 'state', 'city', 'street', 'address', 'content', 'form'):
def upshift_markup(self, root): self.log.debug('Converting style information to CSS...') size_map = { 'xx-small': '0.5', 'x-small': '1', 'small': '2', 'medium': '3', 'large': '4', 'x-large': '5', 'xx-large': '6', } mobi_version = self.book_header.mobi_version for x in root.xpath('//ncx'): x.getparent().remove(x) for i, tag in enumerate(root.iter(etree.Element)): tag.attrib.pop('xmlns', '') for x in tag.attrib: if ':' in x: del tag.attrib[x] if tag.tag in ('country-region', 'place', 'placetype', 'placename', 'state', 'city', 'street', 'address', 'content', 'form'): tag.tag = 'div' if tag.tag in ('content', 'form') else 'span' for key in tag.attrib.keys(): tag.attrib.pop(key) continue styles, attrib = [], tag.attrib if attrib.has_key('style'): style = attrib.pop('style').strip() if style: styles.append(style) if attrib.has_key('height'): height = attrib.pop('height').strip() if height and '<' not in height and '>' not in height and \ re.search(r'\d+', height): if tag.tag in ('table', 'td', 'tr'): pass elif tag.tag == 'img': tag.set('height', height) else: styles.append('margin-top: %s' % self.ensure_unit(height)) if attrib.has_key('width'): width = attrib.pop('width').strip() if width and re.search(r'\d+', width): if tag.tag in ('table', 'td', 'tr'): pass elif tag.tag == 'img': tag.set('width', width) else: styles.append('text-indent: %s' % self.ensure_unit(width)) if width.startswith('-'): styles.append('margin-left: %s' % self.ensure_unit(width[1:])) if attrib.has_key('align'): align = attrib.pop('align').strip() if align: align = align.lower() if align == 'baseline': styles.append('vertical-align: '+align) else: styles.append('text-align: %s' % align) if tag.tag == 'hr': if mobi_version == 1: tag.tag = 'div' styles.append('page-break-before: always') styles.append('display: block') styles.append('margin: 0') elif tag.tag == 'i': tag.tag = 'span' tag.attrib['class'] = 'italic' elif tag.tag == 'b': tag.tag = 'span' tag.attrib['class'] = 'bold' elif tag.tag == 'font': sz = tag.get('size', '').lower() try: float(sz) except ValueError: if sz in size_map.keys(): attrib['size'] = size_map[sz] elif tag.tag == 'img': recindex = None for attr in self.IMAGE_ATTRS: recindex = attrib.pop(attr, None) or recindex if recindex is not None: try: recindex = '%05d'%int(recindex) except: pass attrib['src'] = 'images/%s.jpg' % recindex for attr in ('width', 'height'): if attr in attrib: val = attrib[attr] if val.lower().endswith('em'): try: nval = float(val[:-2]) nval *= 16 * (168.451/72) # Assume this was set using the Kindle profile attrib[attr] = "%dpx"%int(nval) except: del attrib[attr] elif val.lower().endswith('%'): del attrib[attr] elif tag.tag == 'pre': if not tag.text: tag.tag = 'div'
else: icon = I('news/%s.png'%self.urn[8:]) if os.path.exists(icon): self.icon = QVariant(QIcon(icon)) else: self.icon = default_icon
def __init__(self, urn, title, default_icon, custom_icon, builtin, custom, scheduler_config, parent): NewsTreeItem.__init__(self, builtin, custom, scheduler_config, parent) self.urn, self.title = urn, title if 'custom:' in self.urn: self.icon = custom_icon else: icon = I('news/%s.png'%self.urn[8:]) if os.path.exists(icon): self.icon = QVariant(QIcon(icon)) else: self.icon = default_icon
def update_device_database_collections(self, booklists, collections_attributes):
def update_device_database_collections(self, booklists, collections_attributes, oncard):
def update_device_database_collections(self, booklists, collections_attributes):
connection = sqlite.connect(self._main_prefix + '.kobo/KoboReader.sqlite') cursor = connection.cursor() query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null' try: cursor.execute (query) except: debug_print('Database Exception: Unable to reset Im_Reading list') raise else: connection.commit()
def update_device_database_collections(self, booklists, collections_attributes):
self.update_device_database_collections(blist, collections)
if i == 0: oncard = 'main' else: oncard = 'carda' self.update_device_database_collections(blist, collections, oncard)
def sync_booklists(self, booklists, end_session=True):
self.save_state()
def accept(self): try: if self.formats_changed: self.sync_formats() title = unicode(self.title.text()) self.db.set_title(self.id, title, notify=False) au = unicode(self.authors.text()) if au: self.db.set_authors(self.id, string_to_authors(au), notify=False) aus = unicode(self.author_sort.text()) if aus: self.db.set_author_sort(self.id, aus, notify=False) self.db.set_isbn(self.id, re.sub(r'[^0-9a-zA-Z]', '', unicode(self.isbn.text())), notify=False) self.db.set_rating(self.id, 2*self.rating.value(), notify=False) self.db.set_publisher(self.id, unicode(self.publisher.currentText()), notify=False) self.db.set_tags(self.id, [x.strip() for x in unicode(self.tags.text()).split(',')], notify=False) self.db.set_series(self.id, unicode(self.series.currentText()).strip(), notify=False) self.db.set_series_index(self.id, self.series_index.value(), notify=False) self.db.set_comment(self.id, unicode(self.comments.toPlainText()), notify=False) d = self.pubdate.date() d = qt_to_dt(d) self.db.set_pubdate(self.id, d, notify=False) d = self.date.date() d = qt_to_dt(d) if d.date() != self.orig_timestamp.date(): self.db.set_timestamp(self.id, d, notify=False)
this_title['author_sort'] = record['author_sort']
this_title['author_sort'] = record['author_sort'] if len(record['author_sort']) \ else this_title['author']
def fetchBooksByTitle(self):
if not multiple_authors: unique_authors.append((current_author[0], current_author[1].title(), books_by_current_author)) if False and self.verbose:
else: if (current_author == author and len(authors) > 1) or not multiple_authors: unique_authors.append((current_author[0], current_author[1].title(), books_by_current_author)) if self.verbose:
def fetchBooksByAuthor(self): # Generate a list of titles sorted by author from the database
if event.button() == Qt.LeftButton:
if event.button() == Qt.LeftButton and not self.event_has_mods():
def mousePressEvent(self, event): if event.button() == Qt.LeftButton: self.drag_start_pos = event.pos() return QTableView.mousePressEvent(self, event)
self.drag_start_pos is None or \ QApplication.keyboardModifiers() != Qt.NoModifier or \
def mouseMoveEvent(self, event): if not (event.buttons() & Qt.LeftButton) or \ self.drag_start_pos is None or \ QApplication.keyboardModifiers() != Qt.NoModifier or \ (event.pos() - self.drag_start_pos).manhattanLength() \ < QApplication.startDragDistance(): return QTableView.mouseMoveEvent(self, event)
return QTableView.mouseMoveEvent(self, event)
return
def mouseMoveEvent(self, event): if not (event.buttons() & Qt.LeftButton) or \ self.drag_start_pos is None or \ QApplication.keyboardModifiers() != Qt.NoModifier or \ (event.pos() - self.drag_start_pos).manhattanLength() \ < QApplication.startDragDistance(): return QTableView.mouseMoveEvent(self, event)
write_dirtied()
write_dirtied(db)
def do_add_empty(db, title, authors, isbn): from calibre.ebooks.metadata import MetaInformation, string_to_authors mi = MetaInformation(None) if title is not None: mi.title = title if authors: mi.authors = string_to_authors(authors) if isbn: mi.isbn = isbn db.import_book(mi, []) write_dirtied() send_message()
write_dirtied()
write_dirtied(db)
def do_set_metadata(db, id, stream): mi = OPF(stream).to_book_metadata() db.set_metadata(id, mi) db.clean() do_show_metadata(db, id, False) write_dirtied() send_message()
from PyQt4.Qt import QImage, Qt from calibre.gui2 import pixmap_to_data try: from PIL import Image as PILImage PILImage except ImportError: import Image as PILImage
from calibre.utils.magick.draw import Image
def rescale(self, qt=True): from PyQt4.Qt import QImage, Qt from calibre.gui2 import pixmap_to_data try: from PIL import Image as PILImage PILImage except ImportError: import Image as PILImage
if qt: img = QImage(10, 10, QImage.Format_ARGB32_Premultiplied) try: if not img.loadFromData(raw): continue except: continue width, height = img.width(), img.height() else: f = cStringIO.StringIO(raw) try: im = PILImage.open(f) except IOError: continue width, height = im.size
try: img = Image() img.load(raw) except: continue width, height = img.size
def rescale(self, qt=True): from PyQt4.Qt import QImage, Qt from calibre.gui2 import pixmap_to_data try: from PIL import Image as PILImage PILImage except ImportError: import Image as PILImage
data = None
def rescale(self, qt=True): from PyQt4.Qt import QImage, Qt from calibre.gui2 import pixmap_to_data try: from PIL import Image as PILImage PILImage except ImportError: import Image as PILImage
if qt: img = img.scaled(new_width, new_height, Qt.IgnoreAspectRatio, Qt.SmoothTransformation) data = pixmap_to_data(img, format=ext)
try: img.size = (new_width, new_height) data = img.export(ext.lower()) except: self.log.exception('Failed to rescale image')
def rescale(self, qt=True): from PyQt4.Qt import QImage, Qt from calibre.gui2 import pixmap_to_data try: from PIL import Image as PILImage PILImage except ImportError: import Image as PILImage
try: im = im.resize((int(new_width), int(new_height)), PILImage.ANTIALIAS) of = cStringIO.StringIO() im.convert('RGB').save(of, ext) data = of.getvalue() except: self.log.exception('Failed to rescale image') if data is not None:
def rescale(self, qt=True): from PyQt4.Qt import QImage, Qt from calibre.gui2 import pixmap_to_data try: from PIL import Image as PILImage PILImage except ImportError: import Image as PILImage
Not + OneOrMore(~oneOf("and or") + And)
Not + OneOrMore(~oneOf("and or", caseless=True) + And)
def __init__(self, test=False): self._tests_failed = False # Define a token locations = map(lambda x : CaselessLiteral(x)+Suppress(':'), self.LOCATIONS) location = NoMatch() for l in locations: location |= l location = Optional(location, default='all') word_query = CharsNotIn(string.whitespace + '()') quoted_query = Suppress('"')+CharsNotIn('"')+Suppress('"') query = quoted_query | word_query Token = Group(location + query).setResultsName('token')
'title:Dysfunction or author:Laurie': set([348, 444]),
'title:Dysfunction OR author:Laurie': set([348, 444]),
def universal_set(self): ''' Should return the set of all matches. ''' return set([])
'(tag:txt or tag:pdf) and author:Tolstoy': set([55, 56]),
'(tag:txt OR tag:pdf) and author:Tolstoy': set([55, 56]),
def universal_set(self): ''' Should return the set of all matches. ''' return set([])
'tag:txt and not tolstoy': set([33, 258, 354, 305, 242, 154]),
'tag:txt AND NOT tolstoy': set([33, 258, 354, 305, 242, 154]),
def universal_set(self): ''' Should return the set of all matches. ''' return set([])
if True: rec_count = len(self._ctoc_records) self._oeb.logger.info(" CNCX utilization: %d %s %.0f%% full" % \ (rec_count + 1, 'records, last record' if rec_count else 'record,', len(self._ctoc.getvalue())/655) ) return align_block(self._ctoc.getvalue())
self._ctoc.write('\0') ctoc = self._ctoc.getvalue() rec_count = len(self._ctoc_records) self._oeb.logger.info(" CNCX utilization: %d %s %.0f%% full" % \ (rec_count + 1, 'records, last record' if rec_count else 'record,', len(ctoc)/655) ) return align_block(ctoc)
def _generate_ctoc(self): # Generate the compiled TOC strings # Each node has 1-4 CTOC entries: # Periodical (0xDF) # title, class # Section (0xFF) # title, class # Article (0x3F) # title, class, description, author # Chapter (0x0F) # title, class # nb: Chapters don't actually have @class, so we synthesize it # in reader._toc_from_navpoint
if self.cover_changed and self.cover_data is not None: self.db.set_cover(self.id, self.cover_data)
if self.cover_changed: if self.cover_data is not None: self.db.set_cover(self.id, self.cover_data) else: self.db.remove_cover(self.id)
def accept(self): try: if self.formats_changed: self.sync_formats() title = unicode(self.title.text()) self.db.set_title(self.id, title, notify=False) au = unicode(self.authors.text()) if au: self.db.set_authors(self.id, string_to_authors(au), notify=False) aus = unicode(self.author_sort.text()) if aus: self.db.set_author_sort(self.id, aus, notify=False) self.db.set_isbn(self.id, re.sub(r'[^0-9a-zA-Z]', '', unicode(self.isbn.text())), notify=False) self.db.set_rating(self.id, 2*self.rating.value(), notify=False) self.db.set_publisher(self.id, qstring_to_unicode(self.publisher.currentText()), notify=False) self.db.set_tags(self.id, qstring_to_unicode(self.tags.text()).split(','), notify=False) self.db.set_series(self.id, qstring_to_unicode(self.series.currentText()), notify=False) self.db.set_series_index(self.id, self.series_index.value(), notify=False) self.db.set_comment(self.id, qstring_to_unicode(self.comments.toPlainText()), notify=False) d = self.pubdate.date() d = datetime(d.year(), d.month(), d.day()) d = d + self.local_timezone_offset self.db.set_pubdate(self.id, d) d = self.date.date() d = datetime(d.year(), d.month(), d.day()) d = d + self.local_timezone_offset self.db.set_timestamp(self.id, d)
for x in self.scheduler_config.iter_recipes(): urn = x.get('id') if ok(urn): factory(NewsItem, scheduled, urn, x.get('title'))
def ok(urn): if restrict_to_urns is None: return False return not restrict_to_urns or urn in restrict_to_urns
if self.opts.connected_kindle and title['id'] in self.bookmarked_books:
if title['read']: authorTag.insert(0, NavigableString(self.READ_SYMBOL + "by ")) elif self.opts.connected_kindle and title['id'] in self.bookmarked_books:
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir self.updateProgressFullStep("'Descriptions'")
if title['read']: authorTag.insert(0, NavigableString(self.READ_SYMBOL + "by ")) else: authorTag.insert(0, NavigableString(self.NOT_READ_SYMBOL + "by "))
authorTag.insert(0, NavigableString(self.NOT_READ_SYMBOL + "by "))
def generateHTMLDescriptions(self): # Write each title to a separate HTML file in contentdir self.updateProgressFullStep("'Descriptions'")
WINDOWS_MAIN_MEM = re.compile('PRS-((700/)|((6|9)00&))') WINDOWS_CARD_A_MEM = re.compile(r'PRS-((700/\S+:)|((6|9)00_))MS') WINDOWS_CARD_B_MEM = re.compile(r'PRS-((700/\S+:)|((6|9)00_))SD')
WINDOWS_MAIN_MEM = re.compile('PRS-((700[ WINDOWS_CARD_A_MEM = re.compile(r'PRS-((700[/ WINDOWS_CARD_B_MEM = re.compile(r'PRS-((700[/
def write_card_prefix(prefix, listid): if prefix is not None and hasattr(booklists[listid], 'write'): if not os.path.exists(prefix): os.makedirs(prefix) f = open(prefix + self.__class__.CACHE_XML, 'wb') booklists[listid].write(f) f.close()
WEIGHTS[_('Comments')] = 2 WEIGHTS[_('Series')] = 3 WEIGHTS[_('Tags')] = 4
WEIGHTS[_('Comments')] = 4 WEIGHTS[_('Series')] = 2 WEIGHTS[_('Tags')] = 3
def mouseReleaseEvent(self, ev): self.emit(SIGNAL('mr(int)'), 1)
ans = time.strftime(fmt, t).decode(preferred_encoding, 'replace')
else: ans = time.strftime(fmt, t).decode(preferred_encoding, 'replace')
def strftime(fmt, t=None): ''' A version of strftime that returns unicode strings and tries to handle dates before 1900 ''' if t is None: t = time.localtime() if hasattr(t, 'timetuple'): t = t.timetuple() early_year = t[0] < 1900 if early_year: replacement = 1900 if t[0]%4 == 0 else 1901 fmt = fmt.replace('%Y', '_early year hack##') t = list(t) orig_year = t[0] t[0] = replacement ans = None if iswindows: if isinstance(fmt, unicode): fmt = fmt.encode('mbcs') ans = plugins['winutil'][0].strftime(fmt, t) ans = time.strftime(fmt, t).decode(preferred_encoding, 'replace') if early_year: ans = ans.replace('_early year hack##', str(orig_year)) return ans
self.conflicting_custom_cols[label] = set([m[label]]) self.conflicting_custom_cols[label].add(args)
self.conflicting_custom_cols[label] = [] self.conflicting_custom_cols[label].append(args)
def create_cc_metadata(self): self.books.sort(key=itemgetter('timestamp')) m = {} fields = ('label', 'name', 'datatype', 'is_multiple', 'is_editable', 'display') for b in self.books: for key in b['mi'].custom_field_keys(): cfm = b['mi'].metadata_for_field(key) args = [] for x in fields: if x in cfm: if x == 'is_multiple': args.append(cfm[x] is not None) else: args.append(cfm[x]) if len(args) == len(fields): # TODO: Do series type columns need special handling? label = cfm['label'] if label in m and args != m[label]: if label not in self.conflicting_custom_cols: self.conflicting_custom_cols[label] = set([m[label]]) self.conflicting_custom_cols[label].add(args) m[cfm['label']] = args
self.help_state = True
self.help_state = False
def __init__(self, parent=None): QComboBox.__init__(self, parent) self.normal_background = 'rgb(255, 255, 255, 0%)' self.line_edit = SearchLineEdit(self) self.setLineEdit(self.line_edit) self.connect(self.line_edit, SIGNAL('key_pressed(PyQt_PyObject)'), self.key_pressed, Qt.DirectConnection) self.connect(self.line_edit, SIGNAL('mouse_released(PyQt_PyObject)'), self.mouse_released, Qt.DirectConnection) self.setEditable(True) self.help_state = True self.as_you_type = True self.prev_search = '' self.timer = None self.setInsertPolicy(self.NoInsert) self.setMaxCount(self.MAX_COUNT) self.setSizeAdjustPolicy(self.AdjustToMinimumContentsLengthWithIcon) self.setMinimumContentsLength(25) self._in_a_search = False
c = cmp(sort_key(x), sort_key(y))
if isinstance(x, unicode): c = cmp(sort_key(x), sort_key(y)) else: c = cmp(x, y)
def none_cmp(xx, yy): x = xx[1] y = yy[1] if x is None and y is None: # No sort_key needed here, because defaults are ascii return cmp(xx[2], yy[2]) if x is None: return 1 if y is None: return -1 c = cmp(sort_key(x), sort_key(y)) if c != 0: return c # same as above -- no sort_key needed here return cmp(xx[2], yy[2])
self.reporter(self.progressInt, self.progressString)
self.reporter(self.progressInt/100., self.progressString)
def updateProgressFullStep(self, description):
self.reporter(self.progressInt, self.progressString)
self.reporter(self.progressInt/100., self.progressString)
def updateProgressMicroStep(self, description, micro_step_pct): step_range = 100/self.total_steps self.progressString = description self.progressInt = ((self.current_step-1)*100)/self.total_steps + (micro_step_pct*step_range)/100 self.reporter(self.progressInt, self.progressString) return "%d%% %s" % (self.progressInt, self.progressString)
stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) content = unicode(etree.tostring(item.data.find(XHTML('body')), encoding=unicode))
content = unicode(etree.tostring(item.data, encoding=unicode))
def mlize_spine(self): output = self.header() if 'titlepage' in self.oeb_book.guide: href = self.oeb_book.guide['titlepage'].href item = self.oeb_book.manifest.hrefs[href] if item.spine_position is None: stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) output += self.dump_text(item.data.find(XHTML('body')), stylizer) output += '{\\page } ' for item in self.oeb_book.spine: self.log.debug('Converting %s to RTF markup...' % item.href) stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) content = unicode(etree.tostring(item.data.find(XHTML('body')), encoding=unicode)) content = self.remove_newlines(content) output += self.dump_text(etree.fromstring(content), stylizer) output += self.footer() output = self.insert_images(output) output = self.clean_text(output)
output += self.dump_text(etree.fromstring(content), stylizer)
content = etree.fromstring(content) stylizer = Stylizer(content, item.href, self.oeb_book, self.opts, self.opts.output_profile) output += self.dump_text(content.find(XHTML('body')), stylizer)
def mlize_spine(self): output = self.header() if 'titlepage' in self.oeb_book.guide: href = self.oeb_book.guide['titlepage'].href item = self.oeb_book.manifest.hrefs[href] if item.spine_position is None: stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) output += self.dump_text(item.data.find(XHTML('body')), stylizer) output += '{\\page } ' for item in self.oeb_book.spine: self.log.debug('Converting %s to RTF markup...' % item.href) stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) content = unicode(etree.tostring(item.data.find(XHTML('body')), encoding=unicode)) content = self.remove_newlines(content) output += self.dump_text(etree.fromstring(content), stylizer) output += self.footer() output = self.insert_images(output) output = self.clean_text(output)
path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg')
try: path = os.path.join(self.abspath(id, index_is_id=True), 'cover.jpg') except: return False
def has_cover(self, index, index_is_id=False): id = index if index_is_id else self.id(index) path = os.path.join(self.library_path, self.path(id, index_is_id=True), 'cover.jpg') return os.access(path, os.R_OK)
if isinstance(aus, str):
if isbytestring(aus):
def create_book_entry(self, mi, cover=None, add_duplicates=True): self._add_newbook_tag(mi) if not add_duplicates and self.has_book(mi): return None series_index = 1.0 if mi.series_index is None else mi.series_index aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors) title = mi.title if isinstance(aus, str): aus = aus.decode(preferred_encoding, 'replace') if isinstance(title, str): title = title.decode(preferred_encoding) obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)', (title, series_index, aus)) id = obj.lastrowid self.data.books_added([id], self) self.set_path(id, True) self.conn.commit() if mi.timestamp is None: mi.timestamp = utcnow() if mi.pubdate is None: mi.pubdate = utcnow() self.set_metadata(id, mi) if cover is not None: try: self.set_cover(id, cover) except: traceback.print_exc() return id
if isinstance(title, str): title = title.decode(preferred_encoding)
if isbytestring(title): title = title.decode(preferred_encoding, 'replace')
def create_book_entry(self, mi, cover=None, add_duplicates=True): self._add_newbook_tag(mi) if not add_duplicates and self.has_book(mi): return None series_index = 1.0 if mi.series_index is None else mi.series_index aus = mi.author_sort if mi.author_sort else self.author_sort_from_authors(mi.authors) title = mi.title if isinstance(aus, str): aus = aus.decode(preferred_encoding, 'replace') if isinstance(title, str): title = title.decode(preferred_encoding) obj = self.conn.execute('INSERT INTO books(title, series_index, author_sort) VALUES (?, ?, ?)', (title, series_index, aus)) id = obj.lastrowid self.data.books_added([id], self) self.set_path(id, True) self.conn.commit() if mi.timestamp is None: mi.timestamp = utcnow() if mi.pubdate is None: mi.pubdate = utcnow() self.set_metadata(id, mi) if cover is not None: try: self.set_cover(id, cover) except: traceback.print_exc() return id
description = _('Communicate with the PocketBook 602/603/902 reader.')
description = _('Communicate with the PocketBook 602/603/902/903 reader.')
def can_handle(cls, dev, debug=False): return dev[3] == 'Elonex' and dev[4] == 'eBook'
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB603', 'PB902']
WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['PB602', 'PB603', 'PB902', 'PB903']
def can_handle(cls, dev, debug=False): return dev[3] == 'Elonex' and dev[4] == 'eBook'
s += c
s += prepare_string_for_xml(c)
def __unicode__(self): s = u'' open_containers = collections.deque() for c in self.content: if isinstance(c, basestring): s += c elif c is None: if open_containers: p = open_containers.pop() s += u'</%s>'%(p.name,) else: s += unicode(c) if not c.self_closing: open_containers.append(c)
def strftime(epoch, zone=time.gmtime):
def strftime(epoch, zone=None): zone = time.gmtime if islinux: zone = time.localtime
def strftime(epoch, zone=time.gmtime): src = time.strftime("%w, %d %m %Y %H:%M:%S GMT", zone(epoch)).split() src[0] = INVERSE_DAY_MAP[int(src[0][:-1])]+',' src[2] = INVERSE_MONTH_MAP[int(src[2])] return ' '.join(src)
book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
if ContentType == '6': book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576) else: book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID)
def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus): changed = False # if path_to_ext(path) in self.FORMATS: try: lpath = path.partition(self.normalize_path(prefix))[2] if lpath.startswith(os.sep): lpath = lpath[len(os.sep):] lpath = lpath.replace('\\', '/')
return unicode(self.text()).lower() >= unicode(other.text()).lower()
return sort_key(unicode(self.text())) >= sort_key(unicode(other.text()))
def __ge__(self, other): return unicode(self.text()).lower() >= unicode(other.text()).lower()
return unicode(self.text()).lower() < unicode(other.text()).lower()
return sort_key(unicode(self.text())) < sort_key(unicode(other.text()))
def __lt__(self, other): return unicode(self.text()).lower() < unicode(other.text()).lower()
_('Password to access your calibre library. Username is ')
'Password to access your calibre library. Username is '
def __init__(self, db, opts, embedded=False, show_tracebacks=True, wsgi=False): self.is_wsgi = bool(wsgi) self.opts = opts self.embedded = embedded self.state_callback = None self.max_cover_width, self.max_cover_height = \ map(int, self.opts.max_cover.split('x')) path = P('content_server') self.build_time = fromtimestamp(os.stat(path).st_mtime) self.default_cover = open(P('content_server/default_cover.jpg'), 'rb').read()
self.opts.search_text = self.opts.search_text + " " + search_phrase
if self.opts.search_text: self.opts.search_text += " " + search_phrase else: self.opts.search_text = search_phrase
def fetchBooksByTitle(self):
mi.formats = self.formats(idx, index_is_id=index_is_id).split(',')
mi.formats = self.formats(idx, index_is_id=index_is_id) if hasattr(mi.formats, 'split'): mi.formats = mi.formats.split(',') else: mi.formats = None
def get_metadata(self, idx, index_is_id=False, get_cover=False): ''' Convenience method to return metadata as a :class:`Metadata` object. ''' self.gm_count += 1 mi = self.data.get(idx, self.FIELD_MAP['all_metadata'], row_is_id = index_is_id) if mi is not None: return mi
output += self.dump_text(item.data.find(XHTML('body')), stylizer)
content = unicode(etree.tostring(item.data.find(XHTML('body')), encoding=unicode)) content = self.remove_newlines(content) output += self.dump_text(etree.fromstring(content), stylizer)
def mlize_spine(self): output = self.header() if 'titlepage' in self.oeb_book.guide: href = self.oeb_book.guide['titlepage'].href item = self.oeb_book.manifest.hrefs[href] if item.spine_position is None: stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) output += self.dump_text(item.data.find(XHTML('body')), stylizer) output += '{\\page } ' for item in self.oeb_book.spine: self.log.debug('Converting %s to RTF markup...' % item.href) stylizer = Stylizer(item.data, item.href, self.oeb_book, self.opts, self.opts.output_profile) output += self.dump_text(item.data.find(XHTML('body')), stylizer) output += self.footer() output = self.insert_images(output) output = self.clean_text(output)
self.anchors[name] = "anchor%03d" % (len(self.anchors) + 1)
self.anchors[name] = "anchor%d" % (len(self.anchors) + 1)
def get_anchor(self, name): if not self.anchors.has_key(name): self.anchors[name] = "anchor%03d" % (len(self.anchors) + 1) return self.anchors.get(name)
anchor = self.get_anchor("%s.%s" % ( outline, ''.join(self.data)))
tail = ''.join(self.data) anchor = self.get_anchor("%s.%s" % ( outline, tail)) anchor2 = self.get_anchor(tail)
def e_text_h(self, tag, attrs): """ Headings end """ self.writedata() level = int(attrs[(TEXTNS,'outline-level')]) if level > 6: level = 6 # Heading levels go only to 6 in XHTML if level < 1: level = 1 lev = self.headinglevels[1:level+1] outline = '.'.join(map(str,lev) ) anchor = self.get_anchor("%s.%s" % ( outline, ''.join(self.data))) self.opentag('a', {'id': anchor} ) self.closetag('a', False) self.closetag('h%s' % level) self.purgedata()