rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
waste = self.rfile._sock.recv(1) | if not self.rfile._sock.recv(1): break | def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info i = rest.rfind('?') if i >= 0: rest, query = rest[:i], rest[i+1:] else: query = '' i = rest.find('/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error(404, "No such CGI script (%s)" % `scriptname`) return if not os.path.isfile(scriptfile): self.send_error(403, "CGI script is not a plain file (%s)" % `scriptname`) return ispy = self.is_python(scriptname) if not ispy: if not (self.have_fork or self.have_popen2 or self.have_popen3): self.send_error(403, "CGI script is not a Python script (%s)" % `scriptname`) return if not self.is_executable(scriptfile): self.send_error(403, "CGI script is not executable (%s)" % `scriptname`) return |
if userhome.endswith('/'): i += 1 | userhome = userhome.rstrip('/') | def expanduser(path): """Expand ~ and ~user constructions. If user or $HOME is unknown, do nothing.""" if not path.startswith('~'): return path i = path.find('/', 1) if i < 0: i = len(path) if i == 1: if 'HOME' not in os.environ: import pwd userhome = pwd.getpwuid(os.getuid()).pw_dir else: userhome = os.environ['HOME'] else: import pwd try: pwent = pwd.getpwnam(path[1:i]) except KeyError: return path userhome = pwent.pw_dir if userhome.endswith('/'): i += 1 return userhome + path[i:] |
eq(folders, map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3', 'inbox', 'wide'])) | tfolders = map(normF, ['deep', 'deep/f1', 'deep/f2', 'deep/f2/f3', 'inbox', 'wide']) tfolders.sort() eq(folders, tfolders) | def test_listfolders(self): mh = getMH() eq = self.assertEquals |
with private names and those defined in other modules. + C.__doc__ for all classes C in M.__dict__.values(), except those with private names and those defined in other modules. | defined in other modules. + C.__doc__ for all classes C in M.__dict__.values(), except those defined in other modules. | def _test(): import doctest, M # replace M with your module's name return doctest.testmod(M) # ditto |
their contained methods and nested classes. Private names reached from M's globals are skipped, but all names reached from M.__test__ are searched. By default, a name is considered to be private if it begins with an underscore (like "_my_func") but doesn't both begin and end with (at least) two underscores (like "__init__"). You can change the default by passing your own "isprivate" function to testmod. | their contained methods and nested classes. All names reached from M.__test__ are searched. Optionally, functions with private names can be skipped (unless listed in M.__test__) by supplying a function to the "isprivate" argument that will identify private functions. For convenience, one such function is supplied. docttest.is_private considers a name to be private if it begins with an underscore (like "_my_func") but doesn't both begin and end with (at least) two underscores (like "__init__"). By supplying this function or your own "isprivate" function to testmod, the behavior can be customized. | def _test(): import doctest, M # replace M with your module's name return doctest.testmod(M) # ditto |
whether a name is private. The default function is doctest.is_private; see its docs for details. | whether a name is private. The default function is to assume that no functions are private. The "isprivate" arg may be set to doctest.is_private in order to skip over functions marked as private using an underscore naming convention; see its docs for details. | def __init__(self, mod=None, globs=None, verbose=None, isprivate=None, optionflags=0): """mod=None, globs=None, verbose=None, isprivate=None, |
isprivate = is_private | isprivate = lambda prefix, base: 0 | def __init__(self, mod=None, globs=None, verbose=None, isprivate=None, optionflags=0): """mod=None, globs=None, verbose=None, isprivate=None, |
>>> t = Tester(globs={}, verbose=0) | >>> t = Tester(globs={}, verbose=0, isprivate=is_private) | ... def bar(self): |
Again, but with a custom isprivate function allowing _f: >>> t = Tester(globs={}, verbose=0, isprivate=lambda x,y: 0) | Again, but with the default isprivate function allowing _f: >>> t = Tester(globs={}, verbose=0) | ... def bar(self): |
>>> t = Tester(globs={}, verbose=0, isprivate=lambda x,y: 0) | >>> t = Tester(globs={}, verbose=0) | ... def bar(self): |
>>> testmod(m1) | >>> testmod(m1, isprivate=is_private) | ... def bar(self): |
with m.__doc__. Private names are skipped. | with m.__doc__. Unless isprivate is specified, private names are not skipped. | def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, report=True, optionflags=0): """m=None, name=None, globs=None, verbose=None, isprivate=None, report=True, optionflags=0 Test examples in docstrings in functions and classes reachable from module m (or the current module if m is not supplied), starting with m.__doc__. Private names are skipped. Also test examples reachable from dict m.__test__ if it exists and is not None. m.__dict__ maps names to functions, classes and strings; function and class docstrings are tested even if the name is private; strings are tested directly, as if they were docstrings. Return (#failures, #tests). See doctest.__doc__ for an overview. Optional keyword arg "name" gives the name of the module; by default use m.__name__. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use m.__dict__. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "isprivate" specifies a function used to determine whether a name is private. The default function is doctest.is_private; see its docs for details. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. This is new in 2.3. Possible values: DONT_ACCEPT_TRUE_FOR_1 By default, if an expected output block contains just "1", an actual output block containing just "True" is considered to be a match, and similarly for "0" versus "False". When DONT_ACCEPT_TRUE_FOR_1 is specified, neither substitution is allowed. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master if m is None: import sys # DWA - m will still be None if this wasn't invoked from the command # line, in which case the following TypeError is about as good an error # as we should expect m = sys.modules.get('__main__') if not _ismodule(m): raise TypeError("testmod: module required; " + `m`) if name is None: name = m.__name__ tester = Tester(m, globs=globs, verbose=verbose, isprivate=isprivate, optionflags=optionflags) failures, tries = tester.rundoc(m, name) f, t = tester.rundict(m.__dict__, name, m) failures += f tries += t if hasattr(m, "__test__"): testdict = m.__test__ if testdict: if not hasattr(testdict, "items"): raise TypeError("testmod: module.__test__ must support " ".items(); " + `testdict`) f, t = tester.run__test__(testdict, name + ".__test__") failures += f tries += t if report: tester.summarize() if master is None: master = tester else: master.merge(tester) return failures, tries |
doctest.is_private; see its docs for details. | treat all functions as public. Optionally, "isprivate" can be set to doctest.is_private to skip over functions marked as private using the underscore naming convention; see its docs for details. | def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None, report=True, optionflags=0): """m=None, name=None, globs=None, verbose=None, isprivate=None, report=True, optionflags=0 Test examples in docstrings in functions and classes reachable from module m (or the current module if m is not supplied), starting with m.__doc__. Private names are skipped. Also test examples reachable from dict m.__test__ if it exists and is not None. m.__dict__ maps names to functions, classes and strings; function and class docstrings are tested even if the name is private; strings are tested directly, as if they were docstrings. Return (#failures, #tests). See doctest.__doc__ for an overview. Optional keyword arg "name" gives the name of the module; by default use m.__name__. Optional keyword arg "globs" gives a dict to be used as the globals when executing examples; by default, use m.__dict__. A copy of this dict is actually used for each docstring, so that each docstring's examples start with a clean slate. Optional keyword arg "verbose" prints lots of stuff if true, prints only failures if false; by default, it's true iff "-v" is in sys.argv. Optional keyword arg "isprivate" specifies a function used to determine whether a name is private. The default function is doctest.is_private; see its docs for details. Optional keyword arg "report" prints a summary at the end when true, else prints nothing at the end. In verbose mode, the summary is detailed, else very brief (in fact, empty if all tests passed). Optional keyword arg "optionflags" or's together module constants, and defaults to 0. This is new in 2.3. Possible values: DONT_ACCEPT_TRUE_FOR_1 By default, if an expected output block contains just "1", an actual output block containing just "True" is considered to be a match, and similarly for "0" versus "False". When DONT_ACCEPT_TRUE_FOR_1 is specified, neither substitution is allowed. Advanced tomfoolery: testmod runs methods of a local instance of class doctest.Tester, then merges the results into (or creates) global Tester instance doctest.master. Methods of doctest.master can be called directly too, if you want to do something unusual. Passing report=0 to testmod is especially useful then, to delay displaying a summary. Invoke doctest.master.summarize(verbose) when you're done fiddling. """ global master if m is None: import sys # DWA - m will still be None if this wasn't invoked from the command # line, in which case the following TypeError is about as good an error # as we should expect m = sys.modules.get('__main__') if not _ismodule(m): raise TypeError("testmod: module required; " + `m`) if name is None: name = m.__name__ tester = Tester(m, globs=globs, verbose=verbose, isprivate=isprivate, optionflags=optionflags) failures, tries = tester.rundoc(m, name) f, t = tester.rundict(m.__dict__, name, m) failures += f tries += t if hasattr(m, "__test__"): testdict = m.__test__ if testdict: if not hasattr(testdict, "items"): raise TypeError("testmod: module.__test__ must support " ".items(); " + `testdict`) f, t = tester.run__test__(testdict, name + ".__test__") failures += f tries += t if report: tester.summarize() if master is None: master = tester else: master.merge(tester) return failures, tries |
c, noise = noise[0], noise[1:] res = res + c return res + noise + line res = "" for line in map(addnoise, lines): b = binascii.a2b_base64(line) res = res + b verify(res == testdata) | self.fail("binascii.a2b_qp(**{1:1}) didn't raise TypeError") self.assertEqual(binascii.a2b_qp("= "), "") self.assertEqual(binascii.a2b_qp("=="), "=") self.assertEqual(binascii.a2b_qp("=AX"), "=AX") self.assertRaises(TypeError, binascii.b2a_qp, foo="bar") self.assertEqual(binascii.a2b_qp("=00\r\n=00"), "\x00\r\n\x00") self.assertEqual( binascii.b2a_qp("\xff\r\n\xff\n\xff"), "=FF\r\n=FF\r\n=FF" ) self.assertEqual( binascii.b2a_qp("0"*75+"\xff\r\n\xff\r\n\xff"), "0"*75+"=\r\n=FF\r\n=FF\r\n=FF" ) | def addnoise(line): noise = fillers ratio = len(line) // len(noise) res = "" while line and noise: if len(line) // len(noise) > ratio: c, line = line[0], line[1:] else: c, noise = noise[0], noise[1:] res = res + c return res + noise + line |
verify(binascii.a2b_base64(fillers) == '') | def test_main(): test_support.run_unittest(BinASCIITest) | def addnoise(line): noise = fillers ratio = len(line) // len(noise) res = "" while line and noise: if len(line) // len(noise) > ratio: c, line = line[0], line[1:] else: c, noise = noise[0], noise[1:] res = res + c return res + noise + line |
print "uu test" MAX_UU = 45 lines = [] for i in range(0, len(testdata), MAX_UU): b = testdata[i:i+MAX_UU] a = binascii.b2a_uu(b) lines.append(a) print a, res = "" for line in lines: b = binascii.a2b_uu(line) res = res + b verify(res == testdata) crc = binascii.crc32("Test the CRC-32 of") crc = binascii.crc32(" this string.", crc) if crc != 1571220330: print "binascii.crc32() failed." s = '{s\005\000\000\000worldi\002\000\000\000s\005\000\000\000helloi\001\000\000\0000' t = binascii.b2a_hex(s) u = binascii.a2b_hex(t) if s != u: print 'binascii hexlification failed' try: binascii.a2b_hex(t[:-1]) except TypeError: pass else: print 'expected TypeError not raised' try: binascii.a2b_hex(t[:-1] + 'q') except TypeError: pass else: print 'expected TypeError not raised' if have_unicode: verify(binascii.hexlify(unicode('a', 'ascii')) == '61', "hexlify failed for Unicode") try: binascii.a2b_qp("", **{1:1}) except TypeError: pass else: raise TestFailed, "binascii..a2b_qp(**{1:1}) didn't raise TypeError" | if __name__ == "__main__": test_main() | def addnoise(line): noise = fillers ratio = len(line) // len(noise) res = "" while line and noise: if len(line) // len(noise) > ratio: c, line = line[0], line[1:] else: c, noise = noise[0], noise[1:] res = res + c return res + noise + line |
def small(text): return '<small>' + text + '</small>' def strong(text): return '<strong>' + text + '</strong>' def grey(text): return '<font color=" | def small(text): if text: return '<small>' + text + '</small>' else: return '' def strong(text): if text: return '<strong>' + text + '</strong>' else: return '' def grey(text): if text: return '<font color=" else: return '' | def small(text): return '<small>' + text + '</small>' |
'<big><big><strong>%s</strong></big></big>' % str(etype), | '<big><big>%s</big></big>' % strong(pydoc.html.escape(str(etype))), | def html((etype, evalue, etb), context=5): """Return a nice HTML document describing a given traceback.""" import os, types, time, traceback, linecache, inspect, pydoc if type(etype) is types.ClassType: etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading( '<big><big><strong>%s</strong></big></big>' % str(etype), '#ffffff', '#6622aa', pyver + '<br>' + date) + ''' |
function calls leading up to the error, in the order they occurred.''' | function calls leading up to the error, in the order they occurred.</p>''' | def html((etype, evalue, etb), context=5): """Return a nice HTML document describing a given traceback.""" import os, types, time, traceback, linecache, inspect, pydoc if type(etype) is types.ClassType: etype = etype.__name__ pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable date = time.ctime(time.time()) head = '<body bgcolor="#f0f0f8">' + pydoc.html.heading( '<big><big><strong>%s</strong></big></big>' % str(etype), '#ffffff', '#6622aa', pyver + '<br>' + date) + ''' |
frames.append('''<p> | frames.append(''' | def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 |
suffix_map = db.encodings_map | suffix_map = db.suffix_map | def init(files=None): global guess_extension, guess_type global suffix_map, types_map, encodings_map global inited inited = 1 db = MimeTypes() if files is None: files = knownfiles for file in files: if os.path.isfile(file): db.readfp(open(file)) encodings_map = db.encodings_map suffix_map = db.encodings_map types_map = db.types_map guess_extension = db.guess_extension guess_type = db.guess_type |
def main(): input = "MacTextEditor.h" output = SHORT + "gen.py" defsoutput = TOOLBOXDIR + LONG + ".py" scanner = MyScanner(input, output, defsoutput) scanner.scan() scanner.gentypetest(SHORT+"typetest.py") scanner.close() print "=== Done scanning and generating, now importing the generated code... ===" exec "import " + SHORT + "support" print "=== Done. It's up to you to compile it now! ===" | MODNAME = 'Mlte' | def main(): input = "MacTextEditor.h" output = SHORT + "gen.py" defsoutput = TOOLBOXDIR + LONG + ".py" scanner = MyScanner(input, output, defsoutput) scanner.scan() scanner.gentypetest(SHORT+"typetest.py") scanner.close() print "=== Done scanning and generating, now importing the generated code... ===" exec "import " + SHORT + "support" print "=== Done. It's up to you to compile it now! ===" |
class MyScanner(Scanner_OSX): | MODPREFIX = MODNAME INPUTFILE = string.lower(MODPREFIX) + 'gen.py' OUTPUTFILE = MODNAME + "module.c" | def main(): input = "MacTextEditor.h" output = SHORT + "gen.py" defsoutput = TOOLBOXDIR + LONG + ".py" scanner = MyScanner(input, output, defsoutput) scanner.scan() scanner.gentypetest(SHORT+"typetest.py") scanner.close() print "=== Done scanning and generating, now importing the generated code... ===" exec "import " + SHORT + "support" print "=== Done. It's up to you to compile it now! ===" |
def destination(self, type, name, arglist): classname = "Function" listname = "functions" if arglist: t, n, m = arglist[0] if t in OBJECTS and m == "InMode": classname = "Method" listname = t + "_methods" return classname, listname | from macsupport import * | def destination(self, type, name, arglist): classname = "Function" listname = "functions" if arglist: t, n, m = arglist[0] if t in OBJECTS and m == "InMode": classname = "Method" listname = t + "_methods" return classname, listname |
def writeinitialdefs(self): self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n") | def writeinitialdefs(self): self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n") |
|
def makeblacklistnames(self): return [ ] | includestuff = includestuff + """ | def makeblacklistnames(self): return [ ] |
def makegreylist(self): return [] | /* For now we declare them forward here. They'll go to mactoolbox later */ staticforward PyObject *TXNObj_New(TXNObject); staticforward int TXNObj_Convert(PyObject *, TXNObject *); staticforward PyObject *TXNFontMenuObj_New(TXNFontMenuObject); staticforward int TXNFontMenuObj_Convert(PyObject *, TXNFontMenuObject *); | def makegreylist(self): return [] |
def makeblacklisttypes(self): return [ "TXNTab", "TXNMargins", "TXNControlData", "TXNATSUIFeatures", "TXNATSUIVariations", "TXNAttributeData", "TXNTypeAttributes", "TXNMatchTextRecord", "TXNBackground", "UniChar", "TXNFindUPP", ] | // ADD declarations //extern PyObject *_CFTypeRefObj_New(CFTypeRef); //extern int _CFTypeRefObj_Convert(PyObject *, CFTypeRef *); | def makeblacklisttypes(self): return [ "TXNTab", # TBD "TXNMargins", # TBD "TXNControlData", #TBD "TXNATSUIFeatures", #TBD "TXNATSUIVariations", #TBD "TXNAttributeData", #TBD "TXNTypeAttributes", #TBD "TXNMatchTextRecord", #TBD "TXNBackground", #TBD "UniChar", #TBD "TXNFindUPP", ] |
def makerepairinstructions(self): return [ ([("void", "*", "OutMode"), ("ByteCount", "*", "InMode")], [("MlteInBuffer", "*", "InMode")]), ] if __name__ == "__main__": main() | // // /* ** Parse/generate ADD records */ """ initstuff = initstuff + """ // PyMac_INIT_TOOLBOX_OBJECT_NEW(xxxx); """ TXNObject = OpaqueByValueType("TXNObject", "TXNObj") TXNFontMenuObject = OpaqueByValueType("TXNFontMenuObject", "TXNFontMenuObj") TXNFrameID = Type("TXNFrameID", "l") TXNVersionValue = Type("TXNVersionValue", "l") TXNFeatureBits = Type("TXNFeatureBits", "l") TXNInitOptions = Type("TXNInitOptions", "l") TXNFrameOptions = Type("TXNFrameOptions", "l") TXNContinuousFlags = Type("TXNContinuousFlags", "l") TXNMatchOptions = Type("TXNMatchOptions", "l") TXNFileType = OSTypeType("TXNFileType") TXNFrameType = Type("TXNFrameType", "l") TXNDataType = OSTypeType("TXNDataType") TXNControlTag = OSTypeType("TXNControlTag") TXNActionKey = Type("TXNActionKey", "l") TXNTabType = Type("TXNTabType", "b") TXNScrollBarState = Type("TXNScrollBarState", "l") TXNOffset = Type("TXNOffset", "l") TXNObjectRefcon = FakeType("(TXNObjectRefcon)0") TXNErrors = OSErrType("TXNErrors", "l") TXNTypeRunAttributes = OSTypeType("TXNTypeRunAttributes") TXNTypeRunAttributeSizes = Type("TXNTypeRunAttributeSizes", "l") TXNPermanentTextEncodingType = Type("TXNPermanentTextEncodingType", "l") TXTNTag = OSTypeType("TXTNTag") TXNBackgroundType = Type("TXNBackgroundType", "l") DragReference = OpaqueByValueType("DragReference", "DragObj") DragTrackingMessage = Type("DragTrackingMessage", "h") RgnHandle = OpaqueByValueType("RgnHandle", "ResObj") GWorldPtr = OpaqueByValueType("GWorldPtr", "GWorldObj") MlteInBuffer = VarInputBufferType('void *', 'ByteCount', 'l') execfile("mltetypetest.py") class TXNObjDefinition(GlobalObjectDefinition): def outputCheckNewArg(self): Output("if (itself == NULL) return PyMac_Error(resNotFound);") class TXNFontMenuObjDefinition(GlobalObjectDefinition): def outputCheckNewArg(self): Output("if (itself == NULL) return PyMac_Error(resNotFound);") module = MacModule(MODNAME, MODPREFIX, includestuff, finalstuff, initstuff) TXNObject_object = TXNObjDefinition("TXNObject", "TXNObj", "TXNObject") TXNFontMenuObject_object = TXNFontMenuObjDefinition("TXNFontMenuObject", "TXNFontMenuObj", "TXNFontMenuObject") module.addobject(TXNObject_object) module.addobject(TXNFontMenuObject_object) Function = OSErrWeakLinkFunctionGenerator Method = OSErrWeakLinkMethodGenerator functions = [] TXNObject_methods = [] TXNFontMenuObject_methods = [] execfile(INPUTFILE) for f in functions: module.add(f) for f in TXNObject_methods: TXNObject_object.add(f) for f in TXNFontMenuObject_methods: TXNFontMenuObject_object.add(f) SetOutputFileName(OUTPUTFILE) module.generate() | def makerepairinstructions(self): return [ ([("void", "*", "OutMode"), ("ByteCount", "*", "InMode")], [("MlteInBuffer", "*", "InMode")]), ] |
cmdclass = {'build_ext':PyBuildExt}, | cmdclass = {'build_ext':PyBuildExt, 'install':PyBuildInstall}, | def main(): setup(name = 'Python standard library', version = '%d.%d' % sys.version_info[:2], cmdclass = {'build_ext':PyBuildExt}, # The struct module is defined here, because build_ext won't be # called unless there's at least one extension module defined. ext_modules=[Extension('struct', ['structmodule.c'])], # Scripts to install scripts = ['Tools/scripts/pydoc'] ) |
'__module__', '__name__']: return 0 | '__module__', '__name__', '__slots__']: return 0 | def visiblename(name, all=None): """Decide whether to show documentation on a variable.""" # Certain special names are redundant. if name in ['__builtins__', '__doc__', '__file__', '__path__', '__module__', '__name__']: return 0 # Private names are hidden, but special names are displayed. if name.startswith('__') and name.endswith('__'): return 1 if all is not None: # only document that which the programmer exported in __all__ return name in all else: return not name.startswith('_') |
def spillproperties(msg, attrs, predicate): | def spilldescriptors(msg, attrs, predicate): | def spillproperties(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docproperty(name, value, mod)) return attrs |
push(self._docproperty(name, value, mod)) | push(self._docdescriptor(name, value, mod)) | def spillproperties(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: push(self._docproperty(name, value, mod)) return attrs |
inspect.classify_class_attrs(object)) | classify_class_attrs(object)) | def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: base = self.docother(getattr(object, name), name, mod) if callable(value) or inspect.isdatadescriptor(value): doc = getattr(value, "__doc__", None) else: doc = None if doc is None: push('<dl><dt>%s</dl>\n' % base) else: doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict) doc = '<dd><tt>%s</tt>' % doc push('<dl><dt>%s%s</dl>\n' % (base, doc)) push('\n') return attrs |
attrs = spillproperties('Properties %s' % tag, attrs, lambda t: t[1] == 'property') | attrs = spilldescriptors('Data descriptors %s' % tag, attrs, lambda t: t[1] == 'data descriptor') | def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: base = self.docother(getattr(object, name), name, mod) if callable(value) or inspect.isdatadescriptor(value): doc = getattr(value, "__doc__", None) else: doc = None if doc is None: push('<dl><dt>%s</dl>\n' % base) else: doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict) doc = '<dd><tt>%s</tt>' % doc push('<dl><dt>%s%s</dl>\n' % (base, doc)) push('\n') return attrs |
def _docproperty(self, name, value, mod): | def _docdescriptor(self, name, value, mod): | def _docproperty(self, name, value, mod): results = [] push = results.append |
for attr, tag in [('fget', '<em>get</em>'), ('fset', '<em>set</em>'), ('fdel', '<em>delete</em>')]: func = getattr(value, attr) if func is not None: base = self.document(func, tag, mod) push('<dd>%s</dd>\n' % base) | def _docproperty(self, name, value, mod): results = [] push = results.append |
|
return self._docproperty(name, object, mod) | return self._docdescriptor(name, object, mod) | def docproperty(self, object, name=None, mod=None, cl=None): """Produce html documentation for a property.""" return self._docproperty(name, object, mod) |
inspect.classify_class_attrs(object)) | classify_class_attrs(object)) | def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if callable(value) or inspect.isdatadescriptor(value): doc = getattr(value, "__doc__", None) else: doc = None push(self.docother(getattr(object, name), name, mod, 70, doc) + '\n') return attrs |
attrs = spillproperties("Properties %s:\n" % tag, attrs, lambda t: t[1] == 'property') | attrs = spilldescriptors("Data descriptors %s:\n" % tag, attrs, lambda t: t[1] == 'data descriptor') | def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if callable(value) or inspect.isdatadescriptor(value): doc = getattr(value, "__doc__", None) else: doc = None push(self.docother(getattr(object, name), name, mod, 70, doc) + '\n') return attrs |
push(name) need_blank_after_doc = 0 | push(self.bold(name)) push('\n') | def _docproperty(self, name, value, mod): results = [] push = results.append |
need_blank_after_doc = 1 for attr, tag in [('fget', '<get>'), ('fset', '<set>'), ('fdel', '<delete>')]: func = getattr(value, attr) if func is not None: if need_blank_after_doc: push('') need_blank_after_doc = 0 base = self.document(func, tag, mod) push(self.indent(base)) return '\n'.join(results) | push('\n') return ''.join(results) | def _docproperty(self, name, value, mod): results = [] push = results.append |
return self._docproperty(name, object, mod) | return self._docdescriptor(name, object, mod) | def docproperty(self, object, name=None, mod=None, cl=None): """Produce text documentation for a property.""" return self._docproperty(name, object, mod) |
while not is_all_white(line): | while lineno > 0 and not is_all_white(line): | def find_paragraph(text, mark): lineno, col = map(int, string.split(mark, ".")) line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) while is_all_white(line): lineno = lineno + 1 line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) first_lineno = lineno while not is_all_white(line): lineno = lineno + 1 line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) last = "%d.0" % lineno # Search back to beginning of paragraph lineno = first_lineno - 1 line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) while not is_all_white(line): lineno = lineno - 1 line = text.get("%d.0" % lineno, "%d.0 lineend" % lineno) first = "%d.0" % (lineno+1) return first, last, text.get(first, last) |
zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. | zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" Python module (if available) or the InfoZIP "zip" utility (if installed and found on the default search path). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
try: import zipfile except ImportError: zipfile = None | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
|
try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
|
if zipfile is None: if verbose: zipoptions = "-r" else: zipoptions = "-rq" | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
|
import zipfile except ImportError: | spawn(["zip", zipoptions, zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename | ("unable to create zip file '%s': " "could neither import the 'zipfile' module nor " "find a standalone zip utility") % zip_filename | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
log.info("creating '%s' and adding '%s' to it", | else: log.info("creating '%s' and adding '%s' to it", | def make_zipfile (base_name, base_dir, verbose=0, dry_run=0): """Create a zip file from all the files under 'base_dir'. The output zip file will be named 'base_dir' + ".zip". Uses either the InfoZIP "zip" utility (if installed and found on the default search path) or the "zipfile" Python module (if available). If neither tool is available, raises DistutilsExecError. Returns the name of the output zip file. """ # This initially assumed the Unix 'zip' utility -- but # apparently InfoZIP's zip.exe works the same under Windows, so # no changes needed! zip_filename = base_name + ".zip" mkpath(os.path.dirname(zip_filename), dry_run=dry_run) try: spawn(["zip", "-rq", zip_filename, base_dir], dry_run=dry_run) except DistutilsExecError: # XXX really should distinguish between "couldn't find # external 'zip' command" and "zip failed" -- shouldn't try # again in the latter case. (I think fixing this will # require some cooperation from the spawn module -- perhaps # a utility function to search the path, so we can fallback # on zipfile.py without the failed spawn.) try: import zipfile except ImportError: raise DistutilsExecError, \ ("unable to create zip file '%s': " + "could neither find a standalone zip utility nor " + "import the 'zipfile' module") % zip_filename log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) def visit (z, dirname, names): for name in names: path = os.path.normpath(os.path.join(dirname, name)) if os.path.isfile(path): z.write(path, path) if not dry_run: z = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED) os.path.walk(base_dir, visit, z) z.close() return zip_filename |
exts.append( Extension('_md5', ['md5module.c', 'md5c.c']) ) | exts.append( Extension('_md5', ['md5module.c', 'md5.c']) ) | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
for fd, obj in map.iteritems(): | for fd, obj in map.items(): | def poll (timeout=0.0, map=None): if map is None: map = socket_map if map: r = []; w = []; e = [] for fd, obj in map.iteritems(): if obj.readable(): r.append(fd) if obj.writable(): w.append(fd) try: r, w, e = select.select(r, w, e, timeout) except select.error, err: if err[0] != EINTR: raise for fd in r: obj = map.get(fd) if obj is None: continue read(obj) for fd in w: obj = map.get(fd) if obj is None: continue write(obj) |
for fd, obj in map.iteritems(): | for fd, obj in map.items(): | def poll2 (timeout=0.0, map=None): import poll if map is None: map=socket_map if timeout is not None: # timeout is in milliseconds timeout = int(timeout*1000) if map: l = [] for fd, obj in map.iteritems(): flags = 0 if obj.readable(): flags = poll.POLLIN if obj.writable(): flags = flags | poll.POLLOUT if flags: l.append ((fd, flags)) r = poll.poll (l, timeout) for fd, flags in r: obj = map.get(fd) if obj is None: continue readwrite(obj, flags) |
for fd, obj in map.iteritems(): | for fd, obj in map.items(): | def poll3 (timeout=0.0, map=None): # Use the poll() support added to the select module in Python 2.0 if map is None: map=socket_map if timeout is not None: # timeout is in milliseconds timeout = int(timeout*1000) pollster = select.poll() if map: for fd, obj in map.iteritems(): flags = 0 if obj.readable(): flags = select.POLLIN if obj.writable(): flags = flags | select.POLLOUT if flags: pollster.register(fd, flags) try: r = pollster.poll (timeout) except select.error, err: if err[0] != EINTR: raise r = [] for fd, flags in r: obj = map.get(fd) if obj is None: continue readwrite(obj, flags) |
try: | if old: | def __calc_date_time(self): # Set self.__date_time, self.__date, & self.__time by using # time.strftime(). |
except ValueError: pass | def __calc_date_time(self): # Set self.__date_time, self.__date, & self.__time by using # time.strftime(). |
|
if (self.compiler.find_library_file(lib_dirs, 'ncurses')): | if (self.compiler.find_library_file(lib_dirs, 'ncursesw')): curses_libs = ['ncursesw'] exts.append( Extension('_curses', ['_cursesmodule.c'], libraries = curses_libs) ) elif (self.compiler.find_library_file(lib_dirs, 'ncurses')): | def detect_modules(self): # Ensure that /usr/local is always used add_dir_to_list(self.compiler.library_dirs, '/usr/local/lib') add_dir_to_list(self.compiler.include_dirs, '/usr/local/include') |
contents.append(self.docother(value, key, name, 70)) | contents.append(self.docother(value, key, name, maxlen=70)) | def docmodule(self, object, name=None, mod=None): """Produce text documentation for a given module object.""" name = object.__name__ # ignore the passed-in name synop, desc = splitdoc(getdoc(object)) result = self.section('NAME', name + (synop and ' - ' + synop)) |
name, mod, 70, doc) + '\n') | name, mod, maxlen=70, doc=doc) + '\n') | def spilldata(msg, attrs, predicate): ok, attrs = _split_list(attrs, predicate) if ok: hr.maybe() push(msg) for name, kind, homecls, value in ok: if callable(value) or inspect.isdatadescriptor(value): doc = getdoc(value) else: doc = None push(self.docother(getattr(object, name), name, mod, 70, doc) + '\n') return attrs |
def docother(self, object, name=None, mod=None, maxlen=None, doc=None): | def docother(self, object, name=None, mod=None, parent=None, maxlen=None, doc=None): | def docother(self, object, name=None, mod=None, maxlen=None, doc=None): """Produce text documentation for a data object.""" repr = self.repr(object) if maxlen: line = (name and name + ' = ' or '') + repr chop = maxlen - len(line) if chop < 0: repr = repr[:chop] + '...' line = (name and self.bold(name) + ' = ' or '') + repr if doc is not None: line += '\n' + self.indent(str(doc)) return line |
def test_close_fds(self): os.pipe() p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' \ 'sys.stdout.write(str(os.dup(0)))'], stdout=subprocess.PIPE, close_fds=1) self.assertEqual(p.stdout.read(), "3") | def test_preexec(self): # preexec function p = subprocess.Popen([sys.executable, "-c", 'import sys,os;' \ 'sys.stdout.write(os.getenv("FRUIT"))'], stdout=subprocess.PIPE, preexec_fn=lambda: os.putenv("FRUIT", "apple")) self.assertEqual(p.stdout.read(), "apple") |
|
value = unicode(value[2], value[0]).encode("ascii") | param += '*' value = Utils.encode_rfc2231(value[2], value[0], value[1]) | def _formatparam(param, value=None, quote=1): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. """ if value is not None and len(value) > 0: # TupleType is used for RFC 2231 encoded parameter values where items # are (charset, language, value). charset is a string, not a Charset # instance. if isinstance(value, TupleType): # Convert to ascii, ignore language value = unicode(value[2], value[0]).encode("ascii") # BAW: Please check this. I think that if quote is set it should # force quoting even if not necessary. if quote or tspecials.search(value): return '%s="%s"' % (param, Utils.quote(value)) else: return '%s=%s' % (param, value) else: return param |
def set_param(self, param, value, header='Content-Type', requote=1): | def set_param(self, param, value, header='Content-Type', requote=1, charset=None, language=''): | def set_param(self, param, value, header='Content-Type', requote=1): """Set a parameter in the Content-Type: header. |
""" | If charset is specified the parameter will be encoded according to RFC 2231. In this case language is optional. """ if not isinstance(value, TupleType) and charset: value = (charset, language, value) | def set_param(self, param, value, header='Content-Type', requote=1): """Set a parameter in the Content-Type: header. |
raise SystemError,\ 'module "%s.%s" failed to register' % \ (__name__,modname) | raise CodecRegistryError,\ 'module "%s" (%s) failed to register' % \ (mod.__name__, mod.__file__) | def search_function(encoding): # Cache lookup entry = _cache.get(encoding,_unknown) if entry is not _unknown: return entry # Import the module modname = encoding.replace('-', '_') modname = aliases.aliases.get(modname,modname) try: mod = __import__(modname,globals(),locals(),'*') except ImportError,why: # cache misses _cache[encoding] = None return None # Now ask the module for the registry entry try: entry = tuple(mod.getregentry()) except AttributeError: entry = () if len(entry) != 4: raise SystemError,\ 'module "%s.%s" failed to register' % \ (__name__,modname) for obj in entry: if not callable(obj): raise SystemError,\ 'incompatible codecs in module "%s.%s"' % \ (__name__,modname) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if not aliases.aliases.has_key(alias): aliases.aliases[alias] = modname # Return the registry entry return entry |
raise SystemError,\ 'incompatible codecs in module "%s.%s"' % \ (__name__,modname) | raise CodecRegistryError,\ 'incompatible codecs in module "%s" (%s)' % \ (mod.__name__, mod.__file__) | def search_function(encoding): # Cache lookup entry = _cache.get(encoding,_unknown) if entry is not _unknown: return entry # Import the module modname = encoding.replace('-', '_') modname = aliases.aliases.get(modname,modname) try: mod = __import__(modname,globals(),locals(),'*') except ImportError,why: # cache misses _cache[encoding] = None return None # Now ask the module for the registry entry try: entry = tuple(mod.getregentry()) except AttributeError: entry = () if len(entry) != 4: raise SystemError,\ 'module "%s.%s" failed to register' % \ (__name__,modname) for obj in entry: if not callable(obj): raise SystemError,\ 'incompatible codecs in module "%s.%s"' % \ (__name__,modname) # Cache the codec registry entry _cache[encoding] = entry # Register its aliases (without overwriting previously registered # aliases) try: codecaliases = mod.getaliases() except AttributeError: pass else: for alias in codecaliases: if not aliases.aliases.has_key(alias): aliases.aliases[alias] = modname # Return the registry entry return entry |
data.byteswap() | if big_endian: data.byteswap() | def readframes(self, nframes): if self._data_seek_needed: self._data_chunk.rewind() pos = self._soundpos * self._framesize if pos: self._data_chunk.setpos(pos) self._data_seek_needed = 0 if nframes == 0: return '' if self._sampwidth > 1: # unfortunately the fromfile() method does not take # something that only looks like a file object, so # we have to reach into the innards of the chunk object import array data = array.array(_array_fmts[self._sampwidth]) nitems = nframes * self._nchannels if nitems * self._sampwidth > self._data_chunk.chunksize - self._data_chunk.size_read: nitems = (self._data_chunk.chunksize - self._data_chunk.size_read) / self._sampwidth data.fromfile(self._data_chunk.file, nitems) self._data_chunk.size_read = self._data_chunk.size_read + nitems * self._sampwidth data.byteswap() data = data.tostring() else: data = self._data_chunk.read(nframes * self._framesize) if self._convert and data: data = self._convert(data) self._soundpos = self._soundpos + len(data) / (self._nchannels * self._sampwidth) return data |
data.byteswap() | if big_endian: data.byteswap() | def writeframesraw(self, data): self._ensure_header_written(len(data)) nframes = len(data) / (self._sampwidth * self._nchannels) if self._convert: data = self._convert(data) if self._sampwidth > 1: import array data = array.array(_array_fmts[self._sampwidth], data) data.byteswap() data.tofile(self._file) self._datawritten = self._datawritten + len(data) * self._sampwidth else: self._file.write(data) self._datawritten = self._datawritten + len(data) self._nframeswritten = self._nframeswritten + nframes |
_write_long(36 + self._datawritten) | _write_long(self._file, 36 + self._datawritten) | def _patchheader(self): if self._datawritten == self._datalength: return curpos = self._file.tell() self._file.seek(self._form_length_pos, 0) _write_long(36 + self._datawritten) self._file.seek(self._data_length_pos, 0) _write_long(self._file, self._datawritten) self._file.seek(curpos, 0) self._datalength = self._datawritten |
self._dist_path(rpms[0]) | self._dist_path(rpms[0])) | def run (self): |
self._dist_path(debuginfo[0]) | self._dist_path(debuginfo[0])) | def run (self): |
print "Failed", name | print "Passed", name | def confirm(outcome, name): global tests tests = tests + 1 if outcome: if verbose: print "Failed", name else: failures.append(name) |
EventHandlerUPP event; | # def outputFreeIt(self, name): |
|
event = NewEventHandlerUPP(CarbonEvents_HandleCommand); _err = InstallEventHandler(_self->ob_itself, event, 1, &inSpec, (void *)callback, &outRef); | _err = InstallEventHandler(_self->ob_itself, gEventHandlerUPP, 1, &inSpec, (void *)callback, &outRef); | # def outputFreeIt(self, name): |
return Py_BuildValue("l", outRef); """ | return Py_BuildValue("O&", EventHandlerRef_New, outRef);""" | # def outputFreeIt(self, name): |
printf("lock failure\n"); | printf("lock failure\\n"); | # def outputFreeIt(self, name): |
SetOutputFileName('_CarbonEvt.c') | SetOutputFileName('_CarbonEvtmodule.c') | # def outputFreeIt(self, name): |
if self.groupdict.has_key(name): raise error, "can only use each group name once" | ogid = self.groupdict.get(name, None) if ogid is not None: raise error, ("redefinition of group name %s as group %d; " + "was group %d") % (`name`, gid, ogid) | def opengroup(self, name=None): gid = self.groups self.groups = gid + 1 if name: if self.groupdict.has_key(name): raise error, "can only use each group name once" self.groupdict[name] = gid self.open.append(gid) return gid |
def test_mktime(self): self.assertRaises(OverflowError, time.mktime, (999999, 999999, 999999, 999999, 999999, 999999, 999999, 999999, 999999)) | def test_mktime(self): self.assertRaises(OverflowError, time.mktime, (999999, 999999, 999999, 999999, 999999, 999999, 999999, 999999, 999999)) |
|
return isabs(splitdrive(path)[1]) | p = splitdrive(path)[1] return len(p)==1 and p[0] in '/\\' | def ismount(path): """Test whether a path is a mount point""" return isabs(splitdrive(path)[1]) |
if ' raise BuildError, "BuildApplet could destroy your sourcefile on OSX, please rename: %s" % filename | def process(template, filename, destname, copy_codefragment, rsrcname=None, others=[], raw=0, progress="default"): if progress == "default": progress = EasyDialogs.ProgressBar("Processing %s..."%os.path.split(filename)[1], 120) progress.label("Compiling...") progress.inc(0) # Read the source and compile it # (there's no point overwriting the destination if it has a syntax error) fp = open(filename, 'rU') text = fp.read() fp.close() try: code = compile(text, filename, "exec") except SyntaxError, arg: raise BuildError, "Syntax error in script %s: %s" % (filename, arg) except EOFError: raise BuildError, "End-of-file in script %s" % (filename,) # Set the destination file name. Note that basename # does contain the whole filepath, only a .py is stripped. if string.lower(filename[-3:]) == ".py": basename = filename[:-3] if MacOS.runtimemodel != 'macho' and not destname: destname = basename else: basename = filename if not destname: if MacOS.runtimemodel == 'macho': destname = basename + '.app' else: destname = basename + '.applet' if not rsrcname: rsrcname = basename + '.rsrc' # Try removing the output file. This fails in MachO, but it should # do any harm. try: os.remove(destname) except os.error: pass process_common(template, progress, code, rsrcname, destname, 0, copy_codefragment, raw, others) |
|
f = ManualGenerator("SetControlData_Callback", setcontroldata_callback_body, condition=" | f = ManualGenerator("SetControlData_Callback", setcontroldata_callback_body); | def outputCleanupStructMembers(self): Output("Py_XDECREF(self->ob_callbackdict);") Output("if (self->ob_itself)SetControlReference(self->ob_itself, (long)0); /* Make it forget about us */") |
dbfile = s.optiondb()['DBFILE'] | dbfile = s.optiondb().get('DBFILE') | def build(master=None, initialcolor=None, initfile=None, ignore=None, dbfile=None): # create all output widgets s = Switchboard(not ignore and initfile) # defer to the command line chosen color database, falling back to the one # in the .pynche file. if dbfile is None: dbfile = s.optiondb()['DBFILE'] # find a parseable color database colordb = None files = RGB_TXT[:] while colordb is None: try: colordb = ColorDB.get_colordb(dbfile) except (KeyError, IOError): pass if colordb is None: if not files: break dbfile = files.pop(0) if not colordb: usage(1, 'No color database file found, see the -d option.') s.set_colordb(colordb) # create the application window decorations app = PyncheWidget(__version__, s, master=master) w = app.window() # these built-in viewers live inside the main Pynche window s.add_view(StripViewer(s, w)) s.add_view(ChipViewer(s, w)) s.add_view(TypeinViewer(s, w)) # get the initial color as components and set the color on all views. if # there was no initial color given on the command line, use the one that's # stored in the option database if initialcolor is None: optiondb = s.optiondb() red = optiondb.get('RED') green = optiondb.get('GREEN') blue = optiondb.get('BLUE') # but if there wasn't any stored in the database, use grey50 if red is None or blue is None or green is None: red, green, blue = initial_color('grey50', colordb) else: red, green, blue = initial_color(initialcolor, colordb) s.update_views(red, green, blue) return app, s |
return Message(**options).show() | res = Message(**options).show() if isinstance(res, bool): if res: return YES return NO return res | def _show(title=None, message=None, icon=None, type=None, **options): if icon: options["icon"] = icon if type: options["type"] = type if title: options["title"] = title if message: options["message"] = message return Message(**options).show() |
print 'source', `class_tcl` | def readprofile(self, baseName, className): """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if such a file exists in the home directory.""" import os if os.environ.has_key('HOME'): home = os.environ['HOME'] else: home = os.curdir class_tcl = os.path.join(home, '.%s.tcl' % className) class_py = os.path.join(home, '.%s.py' % className) base_tcl = os.path.join(home, '.%s.tcl' % baseName) base_py = os.path.join(home, '.%s.py' % baseName) dir = {'self': self} exec 'from Tkinter import *' in dir if os.path.isfile(class_tcl): print 'source', `class_tcl` self.tk.call('source', class_tcl) if os.path.isfile(class_py): print 'execfile', `class_py` execfile(class_py, dir) if os.path.isfile(base_tcl): print 'source', `base_tcl` self.tk.call('source', base_tcl) if os.path.isfile(base_py): print 'execfile', `base_py` execfile(base_py, dir) |
|
print 'execfile', `class_py` | def readprofile(self, baseName, className): """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if such a file exists in the home directory.""" import os if os.environ.has_key('HOME'): home = os.environ['HOME'] else: home = os.curdir class_tcl = os.path.join(home, '.%s.tcl' % className) class_py = os.path.join(home, '.%s.py' % className) base_tcl = os.path.join(home, '.%s.tcl' % baseName) base_py = os.path.join(home, '.%s.py' % baseName) dir = {'self': self} exec 'from Tkinter import *' in dir if os.path.isfile(class_tcl): print 'source', `class_tcl` self.tk.call('source', class_tcl) if os.path.isfile(class_py): print 'execfile', `class_py` execfile(class_py, dir) if os.path.isfile(base_tcl): print 'source', `base_tcl` self.tk.call('source', base_tcl) if os.path.isfile(base_py): print 'execfile', `base_py` execfile(base_py, dir) |
|
print 'source', `base_tcl` | def readprofile(self, baseName, className): """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if such a file exists in the home directory.""" import os if os.environ.has_key('HOME'): home = os.environ['HOME'] else: home = os.curdir class_tcl = os.path.join(home, '.%s.tcl' % className) class_py = os.path.join(home, '.%s.py' % className) base_tcl = os.path.join(home, '.%s.tcl' % baseName) base_py = os.path.join(home, '.%s.py' % baseName) dir = {'self': self} exec 'from Tkinter import *' in dir if os.path.isfile(class_tcl): print 'source', `class_tcl` self.tk.call('source', class_tcl) if os.path.isfile(class_py): print 'execfile', `class_py` execfile(class_py, dir) if os.path.isfile(base_tcl): print 'source', `base_tcl` self.tk.call('source', base_tcl) if os.path.isfile(base_py): print 'execfile', `base_py` execfile(base_py, dir) |
|
print 'execfile', `base_py` | def readprofile(self, baseName, className): """Internal function. It reads BASENAME.tcl and CLASSNAME.tcl into the Tcl Interpreter and calls execfile on BASENAME.py and CLASSNAME.py if such a file exists in the home directory.""" import os if os.environ.has_key('HOME'): home = os.environ['HOME'] else: home = os.curdir class_tcl = os.path.join(home, '.%s.tcl' % className) class_py = os.path.join(home, '.%s.py' % className) base_tcl = os.path.join(home, '.%s.tcl' % baseName) base_py = os.path.join(home, '.%s.py' % baseName) dir = {'self': self} exec 'from Tkinter import *' in dir if os.path.isfile(class_tcl): print 'source', `class_tcl` self.tk.call('source', class_tcl) if os.path.isfile(class_py): print 'execfile', `class_py` execfile(class_py, dir) if os.path.isfile(base_tcl): print 'source', `base_tcl` self.tk.call('source', base_tcl) if os.path.isfile(base_py): print 'execfile', `base_py` execfile(base_py, dir) |
|
_copy_reg.pickle(stat_result, _pickle_stat_result,_make_stat_result) | try: _copy_reg.pickle(stat_result, _pickle_stat_result, _make_stat_result) except NameError: pass | def _pickle_stat_result(sr): (type, args) = sr.__reduce__() return (_make_stat_result, args) |
print "Failing syntax tree:" pprint.pprint(t) raise | raise TestFailed, s | def roundtrip(f, s): st1 = f(s) t = st1.totuple() try: st2 = parser.sequence2ast(t) except parser.ParserError: print "Failing syntax tree:" pprint.pprint(t) raise |
roundtrip(suite, open(filename).read()) | roundtrip(parser.suite, open(filename).read()) | def roundtrip_fromfile(filename): roundtrip(suite, open(filename).read()) |
roundtrip(expr, s) | roundtrip(parser.expr, s) | def test_expr(s): print "expr:", s roundtrip(expr, s) |
roundtrip(suite, s) | roundtrip(parser.suite, s) | def test_suite(s): print "suite:", s roundtrip(suite, s) |
sequence2ast(tree) | parser.sequence2ast(tree) | def check_bad_tree(tree, label): print print label try: sequence2ast(tree) except parser.ParserError: print "caught expected exception for invalid tree" pass else: print "test failed: did not properly detect invalid tree:" pprint.pprint(tree) |
pass | def check_bad_tree(tree, label): print print label try: sequence2ast(tree) except parser.ParserError: print "caught expected exception for invalid tree" pass else: print "test failed: did not properly detect invalid tree:" pprint.pprint(tree) |
|
type, address = splittype(address) if not type: | import re if not re.match('^([^/:]+)://', address): | def getproxies_registry(): """Return a dictionary of scheme -> proxy server URL mappings. |
def __init__(self): | def __init__(self, allow_none): | def __init__(self): self.funcs = {} self.instance = None |
response = xmlrpclib.dumps(response, methodresponse=1) | response = xmlrpclib.dumps(response, methodresponse=1, allow_none = self.allow_none) | def _marshaled_dispatch(self, data, dispatch_method = None): """Dispatches an XML-RPC method from marshalled (XML) data. |
logRequests=1): | logRequests=1, allow_none=False): | def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler, logRequests=1): self.logRequests = logRequests |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.