rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
self.__x.delete(0, END) self.__y.delete(0, END) self.__z.delete(0, END) self.__x.insert(0, redstr) self.__y.insert(0, greenstr) self.__z.insert(0, bluestr) | x, y, z = self.__x, self.__y, self.__z xicursor = x.index(INSERT) yicursor = y.index(INSERT) zicursor = z.index(INSERT) x.delete(0, END) y.delete(0, END) z.delete(0, END) x.insert(0, redstr) y.insert(0, greenstr) z.insert(0, bluestr) x.icursor(xicursor) y.icursor(yicursor) z.icursor(zicursor) | def update_yourself(self, red, green, blue): if self.__hexp.get(): redstr, greenstr, bluestr = map(hex, (red, green, blue)) else: redstr, greenstr, bluestr = red, green, blue self.__x.delete(0, END) self.__y.delete(0, END) self.__z.delete(0, END) self.__x.insert(0, redstr) self.__y.insert(0, greenstr) self.__z.insert(0, bluestr) |
except TypeError: pass else: raise TestFailed, 'expected TypeError' | except (AttributeError, TypeError): pass else: raise TestFailed, 'expected TypeError or AttributeError' | def b(): 'my docstring' pass |
except TypeError: pass | except (AttributeError, TypeError): pass | def b(): 'my docstring' pass |
except TypeError: | except (AttributeError, TypeError): | def cantset(obj, name, value): verify(hasattr(obj, name)) # Otherwise it's probably a typo try: setattr(obj, name, value) except TypeError: pass else: raise TestFailed, "shouldn't be able to set %s to %r" % (name, value) try: delattr(obj, name) except TypeError: pass else: raise TestFailed, "shouldn't be able to del %s" % name |
dummy = (0, 0, 0, 0, "NULL") | dummy = (0, 0, 0, 0) | def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__ |
DECOMPOSITION = [""] | def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__ |
|
if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" | def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__ |
|
category, combining, bidirectional, mirrored, decomposition | category, combining, bidirectional, mirrored | def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__ |
decomp_data = [""] decomp_index = [0] * len(unicode.chars) for char in unicode.chars: record = unicode.table[char] if record: if record[5]: try: i = decomp_data.index(record[5]) except ValueError: i = len(decomp_data) decomp_data.append(record[5]) else: i = 0 decomp_index[char] = i | def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__ |
|
print " {%d, %d, %d, %d, %s}," % item | print " {%d, %d, %d, %d}," % item | def maketable(): unicode = UnicodeData(UNICODE_DATA) # extract unicode properties dummy = (0, 0, 0, 0, "NULL") table = [dummy] cache = {0: dummy} index = [0] * len(unicode.chars) DECOMPOSITION = [""] for char in unicode.chars: record = unicode.table[char] if record: # extract database properties category = CATEGORY_NAMES.index(record[2]) combining = int(record[3]) bidirectional = BIDIRECTIONAL_NAMES.index(record[4]) mirrored = record[9] == "Y" if record[5]: decomposition = '"%s"' % record[5] else: decomposition = "NULL" item = ( category, combining, bidirectional, mirrored, decomposition ) # add entry to index and item tables i = cache.get(item) if i is None: cache[item] = i = len(table) table.append(item) index[char] = i # FIXME: we really should compress the decomposition stuff # (see the unidb utilities for one way to do this) FILE = "unicodedata_db.h" sys.stdout = open(FILE, "w") print "/* this file was generated by %s %s */" % (SCRIPT, VERSION) print print "/* a list of unique database records */" print "const _PyUnicode_DatabaseRecord _PyUnicode_Database_Records[] = {" for item in table: print " {%d, %d, %d, %d, %s}," % item print "};" print print "/* string literals */" print "const char *_PyUnicode_CategoryNames[] = {" for name in CATEGORY_NAMES: print " \"%s\"," % name print " NULL" print "};" print "const char *_PyUnicode_BidirectionalNames[] = {" for name in BIDIRECTIONAL_NAMES: print " \"%s\"," % name print " NULL" print "};" # split index table index1, index2, shift = splitbins(index) print "/* index tables used to find the right database record */" print "#define SHIFT", shift Array("index1", index1).dump(sys.stdout) Array("index2", index2).dump(sys.stdout) sys.stdout = sys.__stdout__ |
include_dirs = ['Modules/expat'] | include_dirs = [expatinc] | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
(sys.platform.startswith('linux') and | ((sys.platform.startswith('linux') or sys.platform.startswith('gnu')) and | def finalize_options (self): from distutils import sysconfig |
self.assertRaises(TypeError, setattr, X.x, "offset", 92) self.assertRaises(TypeError, setattr, X.x, "size", 92) | self.assertRaises(AttributeError, setattr, X.x, "offset", 92) self.assertRaises(AttributeError, setattr, X.x, "size", 92) | def test_fields(self): # test the offset and size attributes of Structure/Unoin fields. class X(Structure): _fields_ = [("x", c_int), ("y", c_char)] |
from Carbon import AE AE.AEInteractWithUser(50000000) | def quitevent(self, theAppleEvent, theReply): from Carbon import AE AE.AEInteractWithUser(50000000) self._quit() |
|
test_exc('%d', '1', TypeError, "int argument required") test_exc('%g', '1', TypeError, "float argument required") | test_exc('%d', '1', TypeError, "int argument required, not str") test_exc('%g', '1', TypeError, "float argument required, not str") | def test_exc(formatstr, args, exception, excmsg): try: testformat(formatstr, args) except exception, exc: if str(exc) == excmsg: if verbose: print "yes" else: if verbose: print 'no' print 'Unexpected ', exception, ':', repr(str(exc)) except: if verbose: print 'no' print 'Unexpected exception' raise else: raise TestFailed, 'did not get expected exception: %s' % excmsg |
dirs_in_sys_path = {} | _dirs_in_sys_path = {} | def makepath(*paths): dir = os.path.abspath(os.path.join(*paths)) return dir, os.path.normcase(dir) |
if not dirs_in_sys_path.has_key(dircase): | if not _dirs_in_sys_path.has_key(dircase): | def makepath(*paths): dir = os.path.abspath(os.path.join(*paths)) return dir, os.path.normcase(dir) |
dirs_in_sys_path[dircase] = 1 | _dirs_in_sys_path[dircase] = 1 | def makepath(*paths): dir = os.path.abspath(os.path.join(*paths)) return dir, os.path.normcase(dir) |
if not dirs_in_sys_path.has_key(sitedircase): | if not _dirs_in_sys_path.has_key(sitedircase): | def addsitedir(sitedir): sitedir, sitedircase = makepath(sitedir) if not dirs_in_sys_path.has_key(sitedircase): sys.path.append(sitedir) # Add path component try: names = os.listdir(sitedir) except os.error: return names.sort() for name in names: if name[-4:] == endsep + "pth": addpackage(sitedir, name) |
if not dirs_in_sys_path.has_key(dircase) and os.path.exists(dir): | if not _dirs_in_sys_path.has_key(dircase) and os.path.exists(dir): | def addpackage(sitedir, name): fullname = os.path.join(sitedir, name) try: f = open(fullname) except IOError: return while 1: dir = f.readline() if not dir: break if dir[0] == '#': continue if dir.startswith("import"): exec dir continue if dir[-1] == '\n': dir = dir[:-1] dir, dircase = makepath(sitedir, dir) if not dirs_in_sys_path.has_key(dircase) and os.path.exists(dir): sys.path.append(dir) dirs_in_sys_path[dircase] = 1 |
dirs_in_sys_path[dircase] = 1 | _dirs_in_sys_path[dircase] = 1 if reset: _dirs_in_sys_path = None | def addpackage(sitedir, name): fullname = os.path.join(sitedir, name) try: f = open(fullname) except IOError: return while 1: dir = f.readline() if not dir: break if dir[0] == '#': continue if dir.startswith("import"): exec dir continue if dir[-1] == '\n': dir = dir[:-1] dir, dircase = makepath(sitedir, dir) if not dirs_in_sys_path.has_key(dircase) and os.path.exists(dir): sys.path.append(dir) dirs_in_sys_path[dircase] = 1 |
if importer is None: | if importer in (None, True, False): | def get_importer(path_item): """Retrieve a PEP 302 importer for the given path item The returned importer is cached in sys.path_importer_cache if it was newly created by a path hook. If there is no importer, a wrapper around the basic import machinery is returned. This wrapper is never inserted into the importer cache (None is inserted instead). The cache (or part of it) can be cleared manually if a rescan of sys.path_hooks is necessary. """ try: importer = sys.path_importer_cache[path_item] except KeyError: for path_hook in sys.path_hooks: try: importer = path_hook(path_item) break except ImportError: pass else: importer = None sys.path_importer_cache.setdefault(path_item, importer) if importer is None: try: importer = ImpImporter(path_item) except ImportError: pass return importer |
pass | importer = None | def get_importer(path_item): """Retrieve a PEP 302 importer for the given path item The returned importer is cached in sys.path_importer_cache if it was newly created by a path hook. If there is no importer, a wrapper around the basic import machinery is returned. This wrapper is never inserted into the importer cache (None is inserted instead). The cache (or part of it) can be cleared manually if a rescan of sys.path_hooks is necessary. """ try: importer = sys.path_importer_cache[path_item] except KeyError: for path_hook in sys.path_hooks: try: importer = path_hook(path_item) break except ImportError: pass else: importer = None sys.path_importer_cache.setdefault(path_item, importer) if importer is None: try: importer = ImpImporter(path_item) except ImportError: pass return importer |
def cmp(f1, f2): | def cmp(f1, f2, shallow=1): | def cmp(f1, f2): # Compare two files, use the cache if possible. # Return 1 for identical files, 0 for different. # Raise exceptions if either file could not be statted, read, etc. s1, s2 = sig(os.stat(f1)), sig(os.stat(f2)) if s1[0] <> 8 or s2[0] <> 8: # Either is a not a plain file -- always report as different return 0 if s1 == s2: # type, size & mtime match -- report same return 1 if s1[:2] <> s2[:2]: # Types or sizes differ, don't bother # types or sizes differ -- report different return 0 # same type and size -- look in the cache key = (f1, f2) try: cs1, cs2, outcome = cache[key] # cache hit if s1 == cs1 and s2 == cs2: # cached signatures match return outcome # stale cached signature(s) except KeyError: # cache miss pass # really compare outcome = do_cmp(f1, f2) cache[key] = s1, s2, outcome return outcome |
if s1 == s2: | if shallow and s1 == s2: | def cmp(f1, f2): # Compare two files, use the cache if possible. # Return 1 for identical files, 0 for different. # Raise exceptions if either file could not be statted, read, etc. s1, s2 = sig(os.stat(f1)), sig(os.stat(f2)) if s1[0] <> 8 or s2[0] <> 8: # Either is a not a plain file -- always report as different return 0 if s1 == s2: # type, size & mtime match -- report same return 1 if s1[:2] <> s2[:2]: # Types or sizes differ, don't bother # types or sizes differ -- report different return 0 # same type and size -- look in the cache key = (f1, f2) try: cs1, cs2, outcome = cache[key] # cache hit if s1 == cs1 and s2 == cs2: # cached signatures match return outcome # stale cached signature(s) except KeyError: # cache miss pass # really compare outcome = do_cmp(f1, f2) cache[key] = s1, s2, outcome return outcome |
default_bufsize = 8192 | _s = "def %s(self, *args): return self._sock.%s(*args)\n\n" |
|
self._mode = mode if bufsize <= 0: if bufsize == 0: bufsize = 1 else: bufsize = 8192 self._rbufsize = bufsize | self._mode = mode if bufsize < 0: bufsize = self.default_bufsize if bufsize == 0: self._rbufsize = 1 elif bufsize == 1: self._rbufsize = self.default_bufsize else: self._rbufsize = bufsize | def __init__(self, sock, mode='rb', bufsize=-1): self._sock = sock self._mode = mode if bufsize <= 0: if bufsize == 0: bufsize = 1 # Unbuffered mode else: bufsize = 8192 self._rbufsize = bufsize self._wbufsize = bufsize self._rbuf = [ ] self._wbuf = [ ] |
buffer = ''.join(self._wbuf) | buffer = "".join(self._wbuf) self._wbuf = [] | def flush(self): if self._wbuf: buffer = ''.join(self._wbuf) self._sock.sendall(buffer) self._wbuf = [ ] |
self._wbuf = [ ] | def flush(self): if self._wbuf: buffer = ''.join(self._wbuf) self._sock.sendall(buffer) self._wbuf = [ ] |
|
self._wbuf.append (data) if self._wbufsize == 1: if '\n' in data: self.flush () elif self.__get_wbuf_len() >= self._wbufsize: | data = str(data) if not data: return self._wbuf.append(data) if (self._wbufsize == 0 or self._wbufsize == 1 and '\n' in data or self._get_wbuf_len() >= self._wbufsize): | def write(self, data): self._wbuf.append (data) # A _wbufsize of 1 means we're doing unbuffered IO. # Flush accordingly. if self._wbufsize == 1: if '\n' in data: self.flush () elif self.__get_wbuf_len() >= self._wbufsize: self.flush() |
filter(self._sock.sendall, list) self.flush() def __get_wbuf_len (self): | self._wbuf.extend(filter(None, map(str, list))) if (self._wbufsize <= 1 or self._get_wbuf_len() >= self._wbufsize): self.flush() def _get_wbuf_len(self): | def writelines(self, list): filter(self._sock.sendall, list) self.flush() |
for i in [len(x) for x in self._wbuf]: buf_len += i | for x in self._wbuf: buf_len += len(x) | def __get_wbuf_len (self): buf_len = 0 for i in [len(x) for x in self._wbuf]: buf_len += i return buf_len |
def __get_rbuf_len(self): | def _get_rbuf_len(self): | def __get_rbuf_len(self): buf_len = 0 for i in [len(x) for x in self._rbuf]: buf_len += i return buf_len |
for i in [len(x) for x in self._rbuf]: buf_len += i | for x in self._rbuf: buf_len += len(x) | def __get_rbuf_len(self): buf_len = 0 for i in [len(x) for x in self._rbuf]: buf_len += i return buf_len |
buf_len = self.__get_rbuf_len() while size < 0 or buf_len < size: recv_size = max(self._rbufsize, size - buf_len) data = self._sock.recv(recv_size) if not data: break buf_len += len(data) self._rbuf.append(data) data = ''.join(self._rbuf) self._rbuf = [ ] if buf_len > size and size >= 0: | if size < 0: if self._rbufsize <= 1: recv_size = self.default_bufsize else: recv_size = self._rbufsize while 1: data = self._sock.recv(recv_size) if not data: break self._rbuf.append(data) else: buf_len = self._get_rbuf_len() while buf_len < size: recv_size = max(self._rbufsize, size - buf_len) data = self._sock.recv(recv_size) if not data: break buf_len += len(data) self._rbuf.append(data) data = "".join(self._rbuf) self._rbuf = [] if 0 <= size < buf_len: | def read(self, size=-1): buf_len = self.__get_rbuf_len() while size < 0 or buf_len < size: recv_size = max(self._rbufsize, size - buf_len) data = self._sock.recv(recv_size) if not data: break buf_len += len(data) self._rbuf.append(data) # Clear the rbuf at the end so we're not affected by # an exception during a recv data = ''.join(self._rbuf) self._rbuf = [ ] if buf_len > size and size >= 0: self._rbuf.append(data[size:]) data = data[:size] return data |
index = -1 buf_len = self.__get_rbuf_len() if self._rbuf: index = min([x.find('\n') for x in self._rbuf]) while index < 0 and (size < 0 or buf_len < size): recv_size = max(self._rbufsize, size - buf_len) data = self._sock.recv(recv_size) if not data: break buf_len += len(data) self._rbuf.append(data) index = data.find('\n') data = ''.join(self._rbuf) self._rbuf = [ ] index = data.find('\n') if index >= 0: index += 1 elif buf_len > size: index = size else: index = buf_len self._rbuf.append(data[index:]) data = data[:index] | data_len = 0 for index, x in enumerate(self._rbuf): data_len += len(x) if '\n' in x or 0 <= size <= data_len: index += 1 data = "".join(self._rbuf[:index]) end = data.find('\n') if end < 0: end = len(data) else: end += 1 if 0 <= size < end: end = size data, rest = data[:end], data[end:] if rest: self._rbuf[:index] = [rest] else: del self._rbuf[:index] return data recv_size = self._rbufsize while 1: if size >= 0: recv_size = min(self._rbufsize, size - data_len) x = self._sock.recv(recv_size) if not x: break data_len += len(x) self._rbuf.append(x) if '\n' in x or 0 <= size <= data_len: break data = "".join(self._rbuf) end = data.find('\n') if end < 0: end = len(data) else: end += 1 if 0 <= size < end: end = size data, rest = data[:end], data[end:] if rest: self._rbuf = [rest] else: self._rbuf = [] | def readline(self, size=-1): index = -1 buf_len = self.__get_rbuf_len() if self._rbuf: index = min([x.find('\n') for x in self._rbuf]) while index < 0 and (size < 0 or buf_len < size): recv_size = max(self._rbufsize, size - buf_len) data = self._sock.recv(recv_size) if not data: break buf_len += len(data) self._rbuf.append(data) index = data.find('\n') data = ''.join(self._rbuf) self._rbuf = [ ] index = data.find('\n') if index >= 0: index += 1 elif buf_len > size: index = size else: index = buf_len self._rbuf.append(data[index:]) data = data[:index] return data |
if timer is None: if os.name == 'mac': | if not timer: if _has_res: self.timer = resgetrusage self.dispatcher = self.trace_dispatch self.get_time = _get_time_resource elif os.name == 'mac': | def __init__(self, timer=None, bias=None): self.timings = {} self.cur = None self.cmd = "" self.c_func_name = "" |
self._ssnd_chunk.setpos(pos + 8) | self._ssnd_chunk.seek(pos + 8) | def readframes(self, nframes): if self._ssnd_seek_needed: self._ssnd_chunk.seek(0) dummy = self._ssnd_chunk.read(8) pos = self._soundpos * self._framesize if pos: self._ssnd_chunk.setpos(pos + 8) self._ssnd_seek_needed = 0 if nframes == 0: return '' data = self._ssnd_chunk.read(nframes * self._framesize) if self._convert and data: data = self._convert(data) self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth) return data |
start = min(start, len(lines) - context) | start = max(0, min(start, len(lines) - context)) | def getframeinfo(frame, context=1): """Get information about a frame or traceback object. A tuple of five things is returned: the filename, the line number of the current line, the function name, a list of lines of context from the source code, and the index of the current line within that list. The optional second argument specifies the number of lines of context to return, which are centered around the current line.""" if istraceback(frame): lineno = frame.tb_lineno frame = frame.tb_frame else: lineno = frame.f_lineno if not isframe(frame): raise TypeError('arg is not a frame or traceback object') filename = getsourcefile(frame) or getfile(frame) if context > 0: start = lineno - 1 - context//2 try: lines, lnum = findsource(frame) except IOError: lines = index = None else: start = max(start, 1) start = min(start, len(lines) - context) lines = lines[start:start+context] index = lineno - 1 - start else: lines = index = None return (filename, lineno, frame.f_code.co_name, lines, index) |
print "db.h: found", db_ver, "in", d | if db_setup_debug: print "db.h: found", db_ver, "in", d | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
print "db lib: using", db_ver, dblib if db_setup_debug: print "db: lib dir", dblib_dir, "inc dir", db_incdir | if db_setup_debug: print "db lib: using", db_ver, dblib print "db: lib dir", dblib_dir, "inc dir", db_incdir | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
sqlite_setup_debug = True | sqlite_setup_debug = False | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
print "%s/sqlite3.h: version %s"%(d, sqlite_version) | if sqlite_setup_debug: print "%s/sqlite3.h: version %s"%(d, sqlite_version) | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
if self.license: self.licence = 1 | def parse_command_line (self, args): """Parse the setup script's command line: set any Distribution attributes tied to command-line options, create all command objects, and set their options from the command-line. 'args' must be a list of command-line arguments, most likely 'sys.argv[1:]' (see the 'setup()' function). This list is first processed for "global options" -- options that set attributes of the Distribution instance. Then, it is alternately scanned for Distutils command and options for that command. Each new command terminates the options for the previous command. The allowed options for a command are determined by the 'options' attribute of the command object -- thus, we instantiate (and cache) every command object here, in order to access its 'options' attribute. Any error in that 'options' attribute raises DistutilsGetoptError; any error on the command-line raises DistutilsArgError. If no Distutils commands were found on the command line, raises DistutilsArgError. Return true if command-line successfully parsed and we should carry on with executing commands; false if no errors but we shouldn't execute commands (currently, this only happens if user asks for help).""" |
|
self.write(STOP) def dump_special(self, callable, args, state = None): if type(args) is not TupleType and args is not None: raise PicklingError, "Second argument to dump_special " \ "must be a tuple" self.save_reduce(callable, args, state) | def dump(self, object): self.save(object) self.write(STOP) |
|
def save_float(self, object): self.write(FLOAT + `object` + '\n') | def save_float(self, object, pack=struct.pack): if self.bin: self.write(BINFLOAT + pack('>d', object)) else: self.write(FLOAT + `object` + '\n') | def save_float(self, object): self.write(FLOAT + `object` + '\n') |
return self.reader.next() | data = self.reader.next() data, bytesencoded = self.encode(data, self.errors) return data | def next(self): |
msg['Content-Transfer-Encoding'] = '8bit' | charset = msg.get_charset() output_cset = charset and charset.output_charset if output_cset and output_cset.lower().startswith('iso-2202-'): msg['Content-Transfer-Encoding'] = '7bit' else: msg['Content-Transfer-Encoding'] = '8bit' | def encode_7or8bit(msg): """Set the Content-Transfer-Encoding header to 7bit or 8bit.""" orig = msg.get_payload() if orig is None: # There's no payload. For backwards compatibility we use 7bit msg['Content-Transfer-Encoding'] = '7bit' return # We play a trick to make this go fast. If encoding to ASCII succeeds, we # know the data must be 7bit, otherwise treat it as 8bit. try: orig.encode('ascii') except UnicodeError: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' |
print '-%20.20s %20.20 %-30.30s'%(f, d[5:], s) | print '-%20.20s %20.20s %-30.30s'%(f, d[5:], s) | def _test(): import time import sys import string import os args = sys.argv[1:] if not args: for key in 'MAILDIR', 'MAIL', 'LOGNAME', 'USER': if os.environ.has_key(key): mbox = os.environ[key] break else: print "$MAIL, $LOGNAME nor $USER set -- who are you?" return else: mbox = args[0] if mbox[:1] == '+': mbox = os.environ['HOME'] + '/Mail/' + mbox[1:] elif not '/' in mbox: mbox = '/usr/mail/' + mbox if os.path.isdir(mbox): if os.path.isdir(os.path.join(mbox, 'cur')): mb = Maildir(mbox) else: mb = MHMailbox(mbox) else: fp = open(mbox, 'r') mb = UnixMailbox(fp) msgs = [] while 1: msg = mb.next() if msg is None: break msgs.append(msg) if len(args) <= 1: msg.fp = None if len(args) > 1: num = string.atoi(args[1]) print 'Message %d body:'%num msg = msgs[num-1] msg.rewindbody() sys.stdout.write(msg.fp.read()) else: print 'Mailbox',mbox,'has',len(msgs),'messages:' for msg in msgs: f = msg.getheader('from') or "" s = msg.getheader('subject') or "" d = msg.getheader('date') or "" print '-%20.20s %20.20 %-30.30s'%(f, d[5:], s) |
self.pimpinstaller = pimp.PimpInstaller(self.pimpdb) | def setuppimp(self, url): self.pimpprefs = pimp.PimpPreferences() self.pimpdb = pimp.PimpDatabase(self.pimpprefs) self.pimpinstaller = pimp.PimpInstaller(self.pimpdb) if not url: url = self.pimpprefs.pimpDatabase try: self.pimpdb.appendURL(url) except IOError, arg: rv = "Cannot open %s: %s\n" % (url, arg) rv += "\nSee MacPython Package Manager help page." return rv except: rv = "Unspecified error while parsing database: %s\n" % url rv += "Usually, this means the database is not correctly formatted.\n" rv += "\nSee MacPython Package Manager help page." return rv # Check whether we can write the installation directory. # If not, set to the per-user directory, possibly # creating it, if needed. installDir = self.pimpprefs.installDir if not os.access(installDir, os.R_OK|os.W_OK|os.X_OK): rv = self.setuserinstall(1) if rv: return rv return self.pimpprefs.check() |
|
self.pimpinstaller = None | def closepimp(self): self.pimpdb.close() self.pimpprefs = None self.pimpdb = None self.pimpinstaller = None self.packages = [] |
|
list, messages = self.pimpinstaller.prepareInstall(pkg, force, recursive) | pimpinstaller = pimp.PimpInstaller(self.pimpdb) list, messages = pimpinstaller.prepareInstall(pkg, force, recursive) | def installpackage(self, sel, output, recursive, force): pkg = self.packages[sel] list, messages = self.pimpinstaller.prepareInstall(pkg, force, recursive) if messages: return messages messages = self.pimpinstaller.install(list, output) return messages |
messages = self.pimpinstaller.install(list, output) | messages = pimpinstaller.install(list, output) | def installpackage(self, sel, output, recursive, force): pkg = self.packages[sel] list, messages = self.pimpinstaller.prepareInstall(pkg, force, recursive) if messages: return messages messages = self.pimpinstaller.install(list, output) return messages |
self.libs = None | self.libraries = None | self.undef = None |
if type (self.libs) is StringType: self.libs = [self.libs] | if type (self.libraries) is StringType: self.libraries = [self.libraries] | def finalize_options (self): from distutils import sysconfig |
if self.libs is not None: self.compiler.set_libraries (self.libs) | if self.libraries is not None: self.compiler.set_libraries (self.libraries) | if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro (macro) |
if self.distribution.libraries: build_clib = self.find_peer ('build_clib') self.libraries = build_clib.get_library_names () or [] self.library_dirs = [build_clib.build_clib] else: self.libraries = [] self.library_dirs = [] | if self.undef is not None: for macro in self.undef: self.compiler.undefine_macro (macro) |
|
libraries = (self.libraries + (build_info.get ('libraries') or [])) library_dirs = (self.library_dirs + (build_info.get ('library_dirs') or [])) | libraries = build_info.get ('libraries') library_dirs = build_info.get ('library_dirs') rpath = build_info.get ('rpath') | def build_extensions (self, extensions): |
"math.floor(huge)", "math.floor(mhuge)", "float(shuge) == int(shuge)"]: | "math.floor(huge)", "math.floor(mhuge)"]: | def test_float_overflow(): import math if verbose: print "long->float overflow" for x in -2.0, -1.0, 0.0, 1.0, 2.0: verify(float(long(x)) == x) shuge = '12345' * 120 huge = 1L << 30000 mhuge = -huge namespace = {'huge': huge, 'mhuge': mhuge, 'shuge': shuge, 'math': math} for test in ["float(huge)", "float(mhuge)", "complex(huge)", "complex(mhuge)", "complex(huge, 1)", "complex(mhuge, 1)", "complex(1, huge)", "complex(1, mhuge)", "1. + huge", "huge + 1.", "1. + mhuge", "mhuge + 1.", "1. - huge", "huge - 1.", "1. - mhuge", "mhuge - 1.", "1. * huge", "huge * 1.", "1. * mhuge", "mhuge * 1.", "1. // huge", "huge // 1.", "1. // mhuge", "mhuge // 1.", "1. / huge", "huge / 1.", "1. / mhuge", "mhuge / 1.", "1. ** huge", "huge ** 1.", "1. ** mhuge", "mhuge ** 1.", "math.sin(huge)", "math.sin(mhuge)", "math.sqrt(huge)", "math.sqrt(mhuge)", # should do better "math.floor(huge)", "math.floor(mhuge)", "float(shuge) == int(shuge)"]: try: eval(test, namespace) except OverflowError: pass else: raise TestFailed("expected OverflowError from %s" % test) |
print __doc__ % globals() | def usage(status, msg=''): if msg: print msg print __doc__ % globals() sys.exit(status) |
|
break | if colordb: break | def main(): try: opts, args = getopt.getopt( sys.argv[1:], 'hd:', ['database=', 'help']) except getopt.error, msg: usage(1, msg) if len(args) == 0: initialcolor = 'grey50' elif len(args) == 1: initialcolor = args[0] else: usage(1) for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-d', '--database'): RGB_TXT.insert(0, arg) # create the windows and go for f in RGB_TXT: try: colordb = ColorDB.get_colordb(f) break except IOError: pass else: raise IOError('No color database file found') # get triplet for initial color try: red, green, blue = colordb.find_byname(initialcolor) except ColorDB.BadColor: # must be a #rrggbb style color try: red, green, blue = ColorDB.rrggbb_to_triplet(initialcolor) except ColorDB.BadColor: print 'Bad initial color, using default: %s' % initialcolor initialcolor = 'grey50' try: red, green, blue = ColorDB.rrggbb_to_triplet(initialcolor) except ColorDB.BadColor: usage(1, 'Cannot find an initial color to use') # create all output widgets s = Switchboard(colordb) # create the application window decorations app = PyncheWidget(__version__, s) parent = app.parent() s.add_view(StripViewer(s, parent)) s.add_view(ChipViewer(s, parent)) s.add_view(TypeinViewer(s, parent)) s.update_views(red, green, blue) try: app.start() except KeyboardInterrupt: pass |
raise IOError('No color database file found') | usage(1, 'No color database file found, see the -d option.') | def main(): try: opts, args = getopt.getopt( sys.argv[1:], 'hd:', ['database=', 'help']) except getopt.error, msg: usage(1, msg) if len(args) == 0: initialcolor = 'grey50' elif len(args) == 1: initialcolor = args[0] else: usage(1) for opt, arg in opts: if opt in ('-h', '--help'): usage(0) elif opt in ('-d', '--database'): RGB_TXT.insert(0, arg) # create the windows and go for f in RGB_TXT: try: colordb = ColorDB.get_colordb(f) break except IOError: pass else: raise IOError('No color database file found') # get triplet for initial color try: red, green, blue = colordb.find_byname(initialcolor) except ColorDB.BadColor: # must be a #rrggbb style color try: red, green, blue = ColorDB.rrggbb_to_triplet(initialcolor) except ColorDB.BadColor: print 'Bad initial color, using default: %s' % initialcolor initialcolor = 'grey50' try: red, green, blue = ColorDB.rrggbb_to_triplet(initialcolor) except ColorDB.BadColor: usage(1, 'Cannot find an initial color to use') # create all output widgets s = Switchboard(colordb) # create the application window decorations app = PyncheWidget(__version__, s) parent = app.parent() s.add_view(StripViewer(s, parent)) s.add_view(ChipViewer(s, parent)) s.add_view(TypeinViewer(s, parent)) s.update_views(red, green, blue) try: app.start() except KeyboardInterrupt: pass |
if msilib.msi_type=="Intel64;1033": sqlite_arch = "/ia64" elif msilib.msi_type=="x64;1033": sqlite_arch = "/amd64" else: sqlite_arch = "" lib.add_file(srcdir+"/"+sqlite_dir+sqlite_arch+"/sqlite3.dll") | def add_files(db): cab = CAB("python") tmpfiles = [] # Add all executables, icons, text files into the TARGETDIR component root = PyDirectory(db, cab, None, srcdir, "TARGETDIR", "SourceDir") default_feature.set_current() if not msilib.Win64: root.add_file("PCBuild/w9xpopen.exe") root.add_file("README.txt", src="README") root.add_file("NEWS.txt", src="Misc/NEWS") root.add_file("LICENSE.txt", src="LICENSE") root.start_component("python.exe", keyfile="python.exe") root.add_file("PCBuild/python.exe") root.start_component("pythonw.exe", keyfile="pythonw.exe") root.add_file("PCBuild/pythonw.exe") # msidbComponentAttributesSharedDllRefCount = 8, see "Component Table" dlldir = PyDirectory(db, cab, root, srcdir, "DLLDIR", ".") pydll = "python%s%s.dll" % (major, minor) pydllsrc = srcdir + "/PCBuild/" + pydll dlldir.start_component("DLLDIR", flags = 8, keyfile = pydll, uuid = pythondll_uuid) installer = msilib.MakeInstaller() pyversion = installer.FileVersion(pydllsrc, 0) if not snapshot: # For releases, the Python DLL has the same version as the # installer package. assert pyversion.split(".")[:3] == current_version.split(".") dlldir.add_file("PCBuild/python%s%s.dll" % (major, minor), version=pyversion, language=installer.FileVersion(pydllsrc, 1)) # XXX determine dependencies version, lang = extract_msvcr71() dlldir.start_component("msvcr71", flags=8, keyfile="msvcr71.dll", uuid=msvcr71_uuid) dlldir.add_file("msvcr71.dll", src=os.path.abspath("msvcr71.dll"), version=version, language=lang) tmpfiles.append("msvcr71.dll") # Add all .py files in Lib, except lib-tk, test dirs={} pydirs = [(root,"Lib")] while pydirs: parent, dir = pydirs.pop() if dir == ".svn" or dir.startswith("plat-"): continue elif dir in ["lib-tk", "idlelib", "Icons"]: if not have_tcl: continue tcltk.set_current() elif dir in ['test', 'tests', 'data', 'output']: # test: Lib, Lib/email, Lib/bsddb, Lib/ctypes, Lib/sqlite3 # tests: Lib/distutils # data: Lib/email/test # output: Lib/test testsuite.set_current() else: default_feature.set_current() lib = PyDirectory(db, cab, parent, dir, dir, "%s|%s" % (parent.make_short(dir), dir)) # Add additional files dirs[dir]=lib lib.glob("*.txt") if dir=='site-packages': lib.add_file("README.txt", src="README") continue files = lib.glob("*.py") files += lib.glob("*.pyw") if files: # Add an entry to the RemoveFile table to remove bytecode files. lib.remove_pyc() if dir.endswith('.egg-info'): lib.add_file('entry_points.txt') lib.add_file('PKG-INFO') lib.add_file('top_level.txt') lib.add_file('zip-safe') continue if dir=='test' and parent.physical=='Lib': lib.add_file("185test.db") lib.add_file("audiotest.au") lib.add_file("cfgparser.1") lib.add_file("test.xml") lib.add_file("test.xml.out") lib.add_file("testtar.tar") lib.add_file("test_difflib_expect.html") lib.add_file("check_soundcard.vbs") lib.add_file("empty.vbs") lib.glob("*.uue") lib.add_file("readme.txt", src="README") if dir=='decimaltestdata': lib.glob("*.decTest") if dir=='output': lib.glob("test_*") if dir=='idlelib': lib.glob("*.def") lib.add_file("idle.bat") if dir=="Icons": lib.glob("*.gif") lib.add_file("idle.icns") if dir=="command" and parent.physical=="distutils": lib.add_file("wininst-6.exe") lib.add_file("wininst-7.1.exe") if dir=="setuptools": lib.add_file("cli.exe") lib.add_file("gui.exe") if dir=="data" and parent.physical=="test" and parent.basedir.physical=="email": # This should contain all non-.svn files listed in subversion for f in os.listdir(lib.absolute): if f.endswith(".txt") or f==".svn":continue if f.endswith(".au") or f.endswith(".gif"): lib.add_file(f) else: print "WARNING: New file %s in email/test/data" % f for f in os.listdir(lib.absolute): if os.path.isdir(os.path.join(lib.absolute, f)): pydirs.append((lib, f)) # Add DLLs default_feature.set_current() lib = PyDirectory(db, cab, root, srcdir+"/PCBuild", "DLLs", "DLLS|DLLs") lib.add_file("py.ico", src="../PC/py.ico") lib.add_file("pyc.ico", src="../PC/pyc.ico") dlls = [] tclfiles = [] for f in extensions: if f=="_tkinter.pyd": continue if not os.path.exists(srcdir+"/PCBuild/"+f): print "WARNING: Missing extension", f continue dlls.append(f) lib.add_file(f) if have_tcl: if not os.path.exists(srcdir+"/PCBuild/_tkinter.pyd"): print "WARNING: Missing _tkinter.pyd" else: lib.start_component("TkDLLs", tcltk) lib.add_file("_tkinter.pyd") dlls.append("_tkinter.pyd") tcldir = os.path.normpath(srcdir+"/../tcltk/bin") for f in glob.glob1(tcldir, "*.dll"): lib.add_file(f, src=os.path.join(tcldir, f)) # Add sqlite if msilib.msi_type=="Intel64;1033": sqlite_arch = "/ia64" elif msilib.msi_type=="x64;1033": sqlite_arch = "/amd64" else: sqlite_arch = "" lib.add_file(srcdir+"/"+sqlite_dir+sqlite_arch+"/sqlite3.dll") # check whether there are any unknown extensions for f in glob.glob1(srcdir+"/PCBuild", "*.pyd"): if f.endswith("_d.pyd"): continue # debug version if f in dlls: continue print "WARNING: Unknown extension", f # Add headers default_feature.set_current() lib = PyDirectory(db, cab, root, "include", "include", "INCLUDE|include") lib.glob("*.h") lib.add_file("pyconfig.h", src="../PC/pyconfig.h") # Add import libraries lib = PyDirectory(db, cab, root, "PCBuild", "libs", "LIBS|libs") for f in dlls: lib.add_file(f.replace('pyd','lib')) lib.add_file('python%s%s.lib' % (major, minor)) # Add the mingw-format library if have_mingw: lib.add_file('libpython%s%s.a' % (major, minor)) if have_tcl: # Add Tcl/Tk tcldirs = [(root, '../tcltk/lib', 'tcl')] tcltk.set_current() while tcldirs: parent, phys, dir = tcldirs.pop() lib = PyDirectory(db, cab, parent, phys, dir, "%s|%s" % (parent.make_short(dir), dir)) if not os.path.exists(lib.absolute): continue for f in os.listdir(lib.absolute): if os.path.isdir(os.path.join(lib.absolute, f)): tcldirs.append((lib, f, f)) else: lib.add_file(f) # Add tools tools.set_current() tooldir = PyDirectory(db, cab, root, "Tools", "Tools", "TOOLS|Tools") for f in ['i18n', 'pynche', 'Scripts', 'versioncheck', 'webchecker']: lib = PyDirectory(db, cab, tooldir, f, f, "%s|%s" % (tooldir.make_short(f), f)) lib.glob("*.py") lib.glob("*.pyw", exclude=['pydocgui.pyw']) lib.remove_pyc() lib.glob("*.txt") if f == "pynche": x = PyDirectory(db, cab, lib, "X", "X", "X|X") x.glob("*.txt") if os.path.exists(os.path.join(lib.absolute, "README")): lib.add_file("README.txt", src="README") if f == 'Scripts': if have_tcl: lib.start_component("pydocgui.pyw", tcltk, keyfile="pydocgui.pyw") lib.add_file("pydocgui.pyw") # Add documentation htmlfiles.set_current() lib = PyDirectory(db, cab, root, "Doc", "Doc", "DOC|Doc") lib.start_component("documentation", keyfile="Python%s%s.chm" % (major,minor)) lib.add_file("Python%s%s.chm" % (major, minor)) cab.commit(db) for f in tmpfiles: os.unlink(f) |
|
pardir_fsr = Carbon.File.FSRef(fss) | pardir_fsr = Carbon.File.FSRef(pardir_fss) | def AskFileForSave(**args): default_flags = 0x07 args, tpwanted = _process_Nav_args(args, _ALLOWED_KEYS, default_flags) try: rr = Nav.NavPutFile(args) good = 1 except Nav.error, arg: if arg[0] != -128: # userCancelledErr raise Nav.error, arg return None if not rr.validRecord or not rr.selection: return None if issubclass(tpwanted, Carbon.File.FSRef): raise TypeError, "Cannot pass wanted=FSRef to AskFileForSave" if issubclass(tpwanted, Carbon.File.FSSpec): return tpwanted(rr.selection[0]) if issubclass(tpwanted, (str, unicode)): # This is gross, and probably incorrect too vrefnum, dirid, name = rr.selection[0].as_tuple() pardir_fss = Carbon.File.FSSpec((vrefnum, dirid, '')) pardir_fsr = Carbon.File.FSRef(fss) pardir_path = pardir_fsr.FSRefMakePath() # This is utf-8 name_utf8 = unicode(name, 'macroman').encode('utf8') fullpath = os.path.join(pardir_path, name_utf8) if issubclass(tpwanted, unicode): return unicode(fullpath, 'utf8') return tpwanted(fullpath) raise TypeError, "Unknown value for argument 'wanted': %s" % repr(tpwanted) |
return sys.modules[fqname] | module = sys.modules[fqname] module.__name__ = fqname return module | def _process_result(self, (ispkg, code, values), fqname): # did get_code() return an actual module? (rather than a code object) is_module = isinstance(code, _ModuleType) |
result = interact(handler.load(), 'System-wide preferences') | options = handler.load() if options['noargs']: EasyDialogs.Message('Warning: system-wide sys.argv processing is off.\nIf you dropped an applet I have not seen it.') result = interact(options, 'System-wide preferences') | def edit_preferences(): handler = pythonprefs.PythonOptions() result = interact(handler.load(), 'System-wide preferences') if result: handler.save(result) |
import pprint pprint.pprint(self.__dict__) | pass | def report(self): # XXX something decent import pprint pprint.pprint(self.__dict__) |
SITE_CO = compile(SITE_PY, "<-bundlebuilder->", "exec") | SITE_CO = compile(SITE_PY, "<-bundlebuilder.py->", "exec") | def report(self): # XXX something decent import pprint pprint.pprint(self.__dict__) |
EXECVE_WRAPPER = """\ import os from sys import argv, executable resources = os.path.join(os.path.dirname(os.path.dirname(argv[0])), "Resources") mainprogram = os.path.join(resources, "%(mainprogram)s") assert os.path.exists(mainprogram) argv.insert(1, mainprogram) os.environ["PYTHONPATH"] = resources %(setexecutable)s os.execve(executable, argv, os.environ) | BOOTSTRAP_SCRIPT = """\ execdir=$(dirname ${0}) executable=${execdir}/%(executable)s resdir=$(dirname ${execdir})/Resources main=${resdir}/%(mainprogram)s PYTHONPATH=$resdir export PYTHONPATH exec ${executable} ${main} ${1} | def report(self): # XXX something decent import pprint pprint.pprint(self.__dict__) |
setExecutableTemplate = """executable = os.path.join(resources, "%s")""" pythonhomeSnippet = """os.environ["home"] = resources""" | def report(self): # XXX something decent import pprint pprint.pprint(self.__dict__) |
|
if self.executable is None: self.executable = sys.executable | def setup(self): if self.standalone and self.mainprogram is None: raise BundleBuilderError, ("must specify 'mainprogram' when " "building a standalone application.") if self.mainprogram is None and self.executable is None: raise BundleBuilderError, ("must specify either or both of " "'executable' and 'mainprogram'") |
|
execpath = pathjoin(self.execdir, self.name) | execname = self.name | def preProcess(self): resdir = "Contents/Resources" if self.executable is not None: if self.mainprogram is None: execpath = pathjoin(self.execdir, self.name) else: execpath = pathjoin(resdir, os.path.basename(self.executable)) if not self.symlink_exec: self.files.append((self.executable, execpath)) self.binaries.append(execpath) self.execpath = execpath # For execve wrapper setexecutable = setExecutableTemplate % os.path.basename(self.executable) else: setexecutable = "" # XXX for locals() call |
execpath = pathjoin(resdir, os.path.basename(self.executable)) | execname = os.path.basename(self.executable) execpath = pathjoin(self.execdir, execname) | def preProcess(self): resdir = "Contents/Resources" if self.executable is not None: if self.mainprogram is None: execpath = pathjoin(self.execdir, self.name) else: execpath = pathjoin(resdir, os.path.basename(self.executable)) if not self.symlink_exec: self.files.append((self.executable, execpath)) self.binaries.append(execpath) self.execpath = execpath # For execve wrapper setexecutable = setExecutableTemplate % os.path.basename(self.executable) else: setexecutable = "" # XXX for locals() call |
setexecutable = setExecutableTemplate % os.path.basename(self.executable) else: setexecutable = "" | def preProcess(self): resdir = "Contents/Resources" if self.executable is not None: if self.mainprogram is None: execpath = pathjoin(self.execdir, self.name) else: execpath = pathjoin(resdir, os.path.basename(self.executable)) if not self.symlink_exec: self.files.append((self.executable, execpath)) self.binaries.append(execpath) self.execpath = execpath # For execve wrapper setexecutable = setExecutableTemplate % os.path.basename(self.executable) else: setexecutable = "" # XXX for locals() call |
|
open(mainwrapperpath, "w").write(EXECVE_WRAPPER % locals()) | open(mainwrapperpath, "w").write(BOOTSTRAP_SCRIPT % locals()) | def preProcess(self): resdir = "Contents/Resources" if self.executable is not None: if self.mainprogram is None: execpath = pathjoin(self.execdir, self.name) else: execpath = pathjoin(resdir, os.path.basename(self.executable)) if not self.symlink_exec: self.files.append((self.executable, execpath)) self.binaries.append(execpath) self.execpath = execpath # For execve wrapper setexecutable = setExecutableTemplate % os.path.basename(self.executable) else: setexecutable = "" # XXX for locals() call |
if self.missingModules: | if self.missingModules or self.maybeMissingModules: | def postProcess(self): self.addPythonModules() if self.strip and not self.symlink: self.stripBinaries() |
self.missingModules.extend(mf.any_missing()) | if hasattr(mf, "any_missing_maybe"): missing, maybe = mf.any_missing_maybe() else: missing = mf.any_missing() maybe = [] self.missingModules.extend(missing) self.maybeMissingModules.extend(maybe) | def findDependencies(self): self.message("Finding module dependencies", 1) import modulefinder mf = modulefinder.ModuleFinder(excludes=self.excludeModules) # manually add our own site.py site = mf.add_module("site") site.__code__ = SITE_CO mf.scan_code(SITE_CO, site) |
missingsub = [name for name in missing if "." in name] missing = [name for name in missing if "." not in name] | if self.maybeMissingModules: maybe = self.maybeMissingModules else: maybe = [name for name in missing if "." in name] missing = [name for name in missing if "." not in name] | def reportMissing(self): missing = [name for name in self.missingModules if name not in MAYMISS_MODULES] missingsub = [name for name in missing if "." in name] missing = [name for name in missing if "." not in name] missing.sort() missingsub.sort() if missing: self.message("Warning: couldn't find the following modules:", 1) self.message(" " + ", ".join(missing)) if missingsub: self.message("Warning: couldn't find the following submodules " "(but it's probably OK since modulefinder can't distinguish " "between from \"module import submodule\" and " "\"from module import name\"):", 1) self.message(" " + ", ".join(missingsub)) |
missingsub.sort() | maybe.sort() if maybe: self.message("Warning: couldn't find the following submodules:", 1) self.message(" (Note that these could be false alarms -- " "it's not always", 1) self.message(" possible to distinguish between from \"package import submodule\" ", 1) self.message(" and \"from package import name\")", 1) for name in maybe: self.message(" ? " + name, 1) | def reportMissing(self): missing = [name for name in self.missingModules if name not in MAYMISS_MODULES] missingsub = [name for name in missing if "." in name] missing = [name for name in missing if "." not in name] missing.sort() missingsub.sort() if missing: self.message("Warning: couldn't find the following modules:", 1) self.message(" " + ", ".join(missing)) if missingsub: self.message("Warning: couldn't find the following submodules " "(but it's probably OK since modulefinder can't distinguish " "between from \"module import submodule\" and " "\"from module import name\"):", 1) self.message(" " + ", ".join(missingsub)) |
self.message(" " + ", ".join(missing)) if missingsub: self.message("Warning: couldn't find the following submodules " "(but it's probably OK since modulefinder can't distinguish " "between from \"module import submodule\" and " "\"from module import name\"):", 1) self.message(" " + ", ".join(missingsub)) | for name in missing: self.message(" ? " + name, 1) def report(self): import pprint pprint.pprint(self.__dict__) if self.standalone: self.reportMissing() | def reportMissing(self): missing = [name for name in self.missingModules if name not in MAYMISS_MODULES] missingsub = [name for name in missing if "." in name] missing = [name for name in missing if "." not in name] missing.sort() missingsub.sort() if missing: self.message("Warning: couldn't find the following modules:", 1) self.message(" " + ", ".join(missing)) if missingsub: self.message("Warning: couldn't find the following submodules " "(but it's probably OK since modulefinder can't distinguish " "between from \"module import submodule\" and " "\"from module import name\"):", 1) self.message(" " + ", ".join(missingsub)) |
return self._qsize - self.getfilled() | return (self._qsize / self._nchannels / self._sampwidth) - self.getfilled() | def getfillable(self): return self._qsize - self.getfilled() |
if op in (LITERAL, NOT_LITERAL): | if op in LITERAL_CODES: | def _compile(code, pattern, flags): # internal: compile a (sub)pattern emit = code.append for op, av in pattern: if op in (LITERAL, NOT_LITERAL): if flags & SRE_FLAG_IGNORECASE: emit(OPCODES[OP_IGNORE[op]]) emit(_sre.getlower(av, flags)) else: emit(OPCODES[op]) emit(av) elif op is IN: if flags & SRE_FLAG_IGNORECASE: emit(OPCODES[OP_IGNORE[op]]) def fixup(literal, flags=flags): return _sre.getlower(literal, flags) else: emit(OPCODES[op]) fixup = lambda x: x skip = len(code); emit(0) _compile_charset(av, flags, code, fixup) code[skip] = len(code) - skip elif op is ANY: if flags & SRE_FLAG_DOTALL: emit(OPCODES[ANY_ALL]) else: emit(OPCODES[ANY]) elif op in (REPEAT, MIN_REPEAT, MAX_REPEAT): if flags & SRE_FLAG_TEMPLATE: raise error, "internal: unsupported template operator" emit(OPCODES[REPEAT]) skip = len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) emit(OPCODES[SUCCESS]) code[skip] = len(code) - skip elif _simple(av) and op != REPEAT: if op == MAX_REPEAT: emit(OPCODES[REPEAT_ONE]) else: emit(OPCODES[MIN_REPEAT_ONE]) skip = len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) emit(OPCODES[SUCCESS]) code[skip] = len(code) - skip else: emit(OPCODES[REPEAT]) skip = len(code); emit(0) emit(av[0]) emit(av[1]) _compile(code, av[2], flags) code[skip] = len(code) - skip if op == MAX_REPEAT: emit(OPCODES[MAX_UNTIL]) else: emit(OPCODES[MIN_UNTIL]) elif op is SUBPATTERN: if av[0]: emit(OPCODES[MARK]) emit((av[0]-1)*2) # _compile_info(code, av[1], flags) _compile(code, av[1], flags) if av[0]: emit(OPCODES[MARK]) emit((av[0]-1)*2+1) elif op in (SUCCESS, FAILURE): emit(OPCODES[op]) elif op in (ASSERT, ASSERT_NOT): emit(OPCODES[op]) skip = len(code); emit(0) if av[0] >= 0: emit(0) # look ahead else: lo, hi = av[1].getwidth() if lo != hi: raise error, "look-behind requires fixed-width pattern" emit(lo) # look behind _compile(code, av[1], flags) emit(OPCODES[SUCCESS]) code[skip] = len(code) - skip elif op is CALL: emit(OPCODES[op]) skip = len(code); emit(0) _compile(code, av, flags) emit(OPCODES[SUCCESS]) code[skip] = len(code) - skip elif op is AT: emit(OPCODES[op]) if flags & SRE_FLAG_MULTILINE: av = AT_MULTILINE.get(av, av) if flags & SRE_FLAG_LOCALE: av = AT_LOCALE.get(av, av) elif flags & SRE_FLAG_UNICODE: av = AT_UNICODE.get(av, av) emit(ATCODES[av]) elif op is BRANCH: emit(OPCODES[op]) tail = [] for av in av[1]: skip = len(code); emit(0) # _compile_info(code, av, flags) _compile(code, av, flags) emit(OPCODES[JUMP]) tail.append(len(code)); emit(0) code[skip] = len(code) - skip emit(0) # end of branch for tail in tail: code[tail] = len(code) - tail elif op is CATEGORY: emit(OPCODES[op]) if flags & SRE_FLAG_LOCALE: av = CH_LOCALE[av] elif flags & SRE_FLAG_UNICODE: av = CH_UNICODE[av] emit(CHCODES[av]) elif op is GROUPREF: if flags & SRE_FLAG_IGNORECASE: emit(OPCODES[OP_IGNORE[op]]) else: emit(OPCODES[op]) emit(av-1) elif op is GROUPREF_EXISTS: emit(OPCODES[op]) emit((av[0]-1)*2) skipyes = len(code); emit(0) _compile(code, av[1], flags) if av[2]: emit(OPCODES[JUMP]) skipno = len(code); emit(0) code[skipyes] = len(code) - skipyes + 1 _compile(code, av[2], flags) code[skipno] = len(code) - skipno else: code[skipyes] = len(code) - skipyes + 1 else: raise ValueError, ("unsupported operand type", op) |
fixup = lambda x: x skip = len(code); emit(0) | fixup = _identityfunction skip = _len(code); emit(0) | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
code[skip] = len(code) - skip | code[skip] = _len(code) - skip | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
elif op in (REPEAT, MIN_REPEAT, MAX_REPEAT): | elif op in REPEATING_CODES: | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
skip = len(code); emit(0) | skip = _len(code); emit(0) | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
code[skip] = len(code) - skip elif _simple(av) and op != REPEAT: if op == MAX_REPEAT: | code[skip] = _len(code) - skip elif _simple(av) and op is not REPEAT: if op is MAX_REPEAT: | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
code[skip] = len(code) - skip if op == MAX_REPEAT: | code[skip] = _len(code) - skip if op is MAX_REPEAT: | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
elif op in (SUCCESS, FAILURE): emit(OPCODES[op]) elif op in (ASSERT, ASSERT_NOT): emit(OPCODES[op]) skip = len(code); emit(0) | elif op in SUCCESS_CODES: emit(OPCODES[op]) elif op in ASSERT_CODES: emit(OPCODES[op]) skip = _len(code); emit(0) | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
tail.append(len(code)); emit(0) code[skip] = len(code) - skip | tailappend(_len(code)); emit(0) code[skip] = _len(code) - skip | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
code[tail] = len(code) - tail | code[tail] = _len(code) - tail | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
skipyes = len(code); emit(0) | skipyes = _len(code); emit(0) | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
skipno = len(code); emit(0) code[skipyes] = len(code) - skipyes + 1 | skipno = _len(code); emit(0) code[skipyes] = _len(code) - skipyes + 1 | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
code[skipno] = len(code) - skipno else: code[skipyes] = len(code) - skipyes + 1 | code[skipno] = _len(code) - skipno else: code[skipyes] = _len(code) - skipyes + 1 | def fixup(literal, flags=flags): return _sre.getlower(literal, flags) |
fixup = lambda x: x | fixup = _identityfunction | def _compile_charset(charset, flags, code, fixup=None): # compile charset subprogram emit = code.append if fixup is None: fixup = lambda x: x for op, av in _optimize_charset(charset, fixup): emit(OPCODES[op]) if op is NEGATE: pass elif op is LITERAL: emit(fixup(av)) elif op is RANGE: emit(fixup(av[0])) emit(fixup(av[1])) elif op is CHARSET: code.extend(av) elif op is BIGCHARSET: code.extend(av) elif op is CATEGORY: if flags & SRE_FLAG_LOCALE: emit(CHCODES[CH_LOCALE[av]]) elif flags & SRE_FLAG_UNICODE: emit(CHCODES[CH_UNICODE[av]]) else: emit(CHCODES[av]) else: raise error, "internal: unsupported set operator" emit(OPCODES[FAILURE]) |
out.append((op, av)) | outappend((op, av)) | def _optimize_charset(charset, fixup): # internal: optimize character set out = [] charmap = [False]*256 try: for op, av in charset: if op is NEGATE: out.append((op, av)) elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could append to charmap tail return charset # cannot compress except IndexError: # character set contains unicode characters return _optimize_unicode(charset, fixup) # compress character map i = p = n = 0 runs = [] for c in charmap: if c: if n == 0: p = i n = n + 1 elif n: runs.append((p, n)) n = 0 i = i + 1 if n: runs.append((p, n)) if len(runs) <= 2: # use literal/range for p, n in runs: if n == 1: out.append((LITERAL, p)) else: out.append((RANGE, (p, p+n-1))) if len(out) < len(charset): return out else: # use bitmap data = _mk_bitmap(charmap) out.append((CHARSET, data)) return out return charset |
runs.append((p, n)) | runsappend((p, n)) | def _optimize_charset(charset, fixup): # internal: optimize character set out = [] charmap = [False]*256 try: for op, av in charset: if op is NEGATE: out.append((op, av)) elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could append to charmap tail return charset # cannot compress except IndexError: # character set contains unicode characters return _optimize_unicode(charset, fixup) # compress character map i = p = n = 0 runs = [] for c in charmap: if c: if n == 0: p = i n = n + 1 elif n: runs.append((p, n)) n = 0 i = i + 1 if n: runs.append((p, n)) if len(runs) <= 2: # use literal/range for p, n in runs: if n == 1: out.append((LITERAL, p)) else: out.append((RANGE, (p, p+n-1))) if len(out) < len(charset): return out else: # use bitmap data = _mk_bitmap(charmap) out.append((CHARSET, data)) return out return charset |
out.append((LITERAL, p)) else: out.append((RANGE, (p, p+n-1))) | outappend((LITERAL, p)) else: outappend((RANGE, (p, p+n-1))) | def _optimize_charset(charset, fixup): # internal: optimize character set out = [] charmap = [False]*256 try: for op, av in charset: if op is NEGATE: out.append((op, av)) elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could append to charmap tail return charset # cannot compress except IndexError: # character set contains unicode characters return _optimize_unicode(charset, fixup) # compress character map i = p = n = 0 runs = [] for c in charmap: if c: if n == 0: p = i n = n + 1 elif n: runs.append((p, n)) n = 0 i = i + 1 if n: runs.append((p, n)) if len(runs) <= 2: # use literal/range for p, n in runs: if n == 1: out.append((LITERAL, p)) else: out.append((RANGE, (p, p+n-1))) if len(out) < len(charset): return out else: # use bitmap data = _mk_bitmap(charmap) out.append((CHARSET, data)) return out return charset |
out.append((CHARSET, data)) | outappend((CHARSET, data)) | def _optimize_charset(charset, fixup): # internal: optimize character set out = [] charmap = [False]*256 try: for op, av in charset: if op is NEGATE: out.append((op, av)) elif op is LITERAL: charmap[fixup(av)] = True elif op is RANGE: for i in range(fixup(av[0]), fixup(av[1])+1): charmap[i] = True elif op is CATEGORY: # XXX: could append to charmap tail return charset # cannot compress except IndexError: # character set contains unicode characters return _optimize_unicode(charset, fixup) # compress character map i = p = n = 0 runs = [] for c in charmap: if c: if n == 0: p = i n = n + 1 elif n: runs.append((p, n)) n = 0 i = i + 1 if n: runs.append((p, n)) if len(runs) <= 2: # use literal/range for p, n in runs: if n == 1: out.append((LITERAL, p)) else: out.append((RANGE, (p, p+n-1))) if len(out) < len(charset): return out else: # use bitmap data = _mk_bitmap(charmap) out.append((CHARSET, data)) return out return charset |
m = m << 1 | m = m + m | def _mk_bitmap(bits): data = [] if _sre.CODESIZE == 2: start = (1, 0) else: start = (1L, 0L) m, v = start for c in bits: if c: v = v + m m = m << 1 if m > MAXCODE: data.append(v) m, v = start return data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.