repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
zimeon/iiif
iiif/auth.py
IIIFAuth.access_token_valid
def access_token_valid(self, token, log_msg): """Check token validity. Returns true if the token is valid. The set of allowed access tokens is stored in self.access_tokens. Uses log_msg as prefix to info level log message of acceptance or rejection. """ if (token in self.access_tokens): (cookie, issue_time) = self.access_tokens[token] age = int(time.time()) - issue_time if (age <= (self.access_token_lifetime + 1)): self.logger.info(log_msg + " " + token + " ACCEPTED TOKEN (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + token + " EXPIRED TOKEN (%ds old > %ds)" % (age, self.access_token_lifetime)) # Keep token for 2x lifetim in order to generate # helpful expired message if (age > (self.access_token_lifetime * 2)): del self.access_tokens[token] return False else: self.logger.info(log_msg + " " + token + " REJECTED TOKEN") return False
python
def access_token_valid(self, token, log_msg): """Check token validity. Returns true if the token is valid. The set of allowed access tokens is stored in self.access_tokens. Uses log_msg as prefix to info level log message of acceptance or rejection. """ if (token in self.access_tokens): (cookie, issue_time) = self.access_tokens[token] age = int(time.time()) - issue_time if (age <= (self.access_token_lifetime + 1)): self.logger.info(log_msg + " " + token + " ACCEPTED TOKEN (%ds old)" % age) return True # Expired... self.logger.info(log_msg + " " + token + " EXPIRED TOKEN (%ds old > %ds)" % (age, self.access_token_lifetime)) # Keep token for 2x lifetim in order to generate # helpful expired message if (age > (self.access_token_lifetime * 2)): del self.access_tokens[token] return False else: self.logger.info(log_msg + " " + token + " REJECTED TOKEN") return False
Check token validity. Returns true if the token is valid. The set of allowed access tokens is stored in self.access_tokens. Uses log_msg as prefix to info level log message of acceptance or rejection.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth.py#L271-L298
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.info_authn
def info_authn(self): """Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token. """ authz_header = request.headers.get('Authorization', '[none]') if (not authz_header.startswith('Bearer ')): return False token = authz_header[7:] return self.access_token_valid( token, "info_authn: Authorization header")
python
def info_authn(self): """Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token. """ authz_header = request.headers.get('Authorization', '[none]') if (not authz_header.startswith('Bearer ')): return False token = authz_header[7:] return self.access_token_valid( token, "info_authn: Authorization header")
Check to see if user if authenticated for info.json. Must have Authorization header with value that has the form "Bearer TOKEN", where TOKEN is an appropriate and valid access token.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L23-L35
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.image_authn
def image_authn(self): """Check to see if user if authenticated for image requests. Must have access cookie with an appropriate value. """ authn_cookie = request.cookies.get( self.access_cookie_name, default='[none]') return self.access_cookie_valid(authn_cookie, "image_authn: auth cookie")
python
def image_authn(self): """Check to see if user if authenticated for image requests. Must have access cookie with an appropriate value. """ authn_cookie = request.cookies.get( self.access_cookie_name, default='[none]') return self.access_cookie_valid(authn_cookie, "image_authn: auth cookie")
Check to see if user if authenticated for image requests. Must have access cookie with an appropriate value.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L37-L44
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.logout_handler
def logout_handler(self, **args): """Handler for logout button. Delete cookies and return HTML that immediately closes window """ response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, expires=0) response.set_cookie(self.access_cookie_name, expires=0) response.headers['Access-Control-Allow-Origin'] = '*' return response
python
def logout_handler(self, **args): """Handler for logout button. Delete cookies and return HTML that immediately closes window """ response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, expires=0) response.set_cookie(self.access_cookie_name, expires=0) response.headers['Access-Control-Allow-Origin'] = '*' return response
Handler for logout button. Delete cookies and return HTML that immediately closes window
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L57-L68
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.access_token_handler
def access_token_handler(self, **args): """Get access token based on cookie sent with this request. This handler deals with two cases: 1) Non-browser client (indicated by no messageId set in request) where the response is a simple JSON response. 2) Browser client (indicate by messageId setin request) where the request must be made from a an iFrame and the response is sent as JSON wrapped in HTML containing a postMessage() script that conveys the access token to the viewer. """ message_id = request.args.get('messageId', default=None) origin = request.args.get('origin', default='unknown_origin') self.logger.info("access_token_handler: origin = " + origin) account = request.cookies.get(self.account_cookie_name, default='') token = self.access_token(account) # Build JSON response data_str = json.dumps(self.access_token_response(token, message_id)) ct = "application/json" # If message_id is set the wrap in HTML with postMessage JavaScript # for a browser client if (message_id is not None): data_str = """<html> <body style="margin: 0px;"> <div>postMessage ACCESS TOKEN %s</div> <script> window.parent.postMessage(%s, '%s'); </script> </body> </html> """ % (token, data_str, origin) ct = "text/html" # Send response along with cookie response = make_response(data_str, 200, {'Content-Type': ct}) if (token): self.logger.info( "access_token_handler: setting access token = " + token) # Set the cookie for the image content cookie = self.access_cookie(token) self.logger.info( "access_token_handler: setting access cookie = " + cookie) response.set_cookie(self.access_cookie_name, cookie) else: self.logger.info( "access_token_handler: auth failed, sending error") response.headers['Access-control-allow-origin'] = '*' return response
python
def access_token_handler(self, **args): """Get access token based on cookie sent with this request. This handler deals with two cases: 1) Non-browser client (indicated by no messageId set in request) where the response is a simple JSON response. 2) Browser client (indicate by messageId setin request) where the request must be made from a an iFrame and the response is sent as JSON wrapped in HTML containing a postMessage() script that conveys the access token to the viewer. """ message_id = request.args.get('messageId', default=None) origin = request.args.get('origin', default='unknown_origin') self.logger.info("access_token_handler: origin = " + origin) account = request.cookies.get(self.account_cookie_name, default='') token = self.access_token(account) # Build JSON response data_str = json.dumps(self.access_token_response(token, message_id)) ct = "application/json" # If message_id is set the wrap in HTML with postMessage JavaScript # for a browser client if (message_id is not None): data_str = """<html> <body style="margin: 0px;"> <div>postMessage ACCESS TOKEN %s</div> <script> window.parent.postMessage(%s, '%s'); </script> </body> </html> """ % (token, data_str, origin) ct = "text/html" # Send response along with cookie response = make_response(data_str, 200, {'Content-Type': ct}) if (token): self.logger.info( "access_token_handler: setting access token = " + token) # Set the cookie for the image content cookie = self.access_cookie(token) self.logger.info( "access_token_handler: setting access cookie = " + cookie) response.set_cookie(self.access_cookie_name, cookie) else: self.logger.info( "access_token_handler: auth failed, sending error") response.headers['Access-control-allow-origin'] = '*' return response
Get access token based on cookie sent with this request. This handler deals with two cases: 1) Non-browser client (indicated by no messageId set in request) where the response is a simple JSON response. 2) Browser client (indicate by messageId setin request) where the request must be made from a an iFrame and the response is sent as JSON wrapped in HTML containing a postMessage() script that conveys the access token to the viewer.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L70-L121
zimeon/iiif
iiif/auth_flask.py
IIIFAuthFlask.set_cookie_close_window_response
def set_cookie_close_window_response(self, account_cookie_value): """Response to set account cookie and close window HTML/JavaScript.""" response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, account_cookie_value) return response
python
def set_cookie_close_window_response(self, account_cookie_value): """Response to set account cookie and close window HTML/JavaScript.""" response = make_response( "<html><script>window.close();</script></html>", 200, {'Content-Type': "text/html"}) response.set_cookie(self.account_cookie_name, account_cookie_value) return response
Response to set account cookie and close window HTML/JavaScript.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_flask.py#L123-L130
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.find_binaries
def find_binaries(cls, tmpdir=None, shellsetup=None, pnmdir=None): """Set instance variables for directory and binary locations. FIXME - should accept params to set things other than defaults. """ cls.tmpdir = ('/tmp' if (tmpdir is None) else tmpdir) # Shell setup command (e.g set library path) cls.shellsetup = ('' if (shellsetup is None) else shellsetup) if (pnmdir is None): cls.pnmdir = '/usr/bin' for dir in ('/usr/local/bin', '/sw/bin'): if (os.path.isfile(os.path.join(dir, 'pngtopnm'))): cls.pnmdir = dir else: cls.pnmdir = pnmdir # Recklessly assume everything else under cls.pnmdir cls.pngtopnm = os.path.join(cls.pnmdir, 'pngtopnm') cls.jpegtopnm = os.path.join(cls.pnmdir, 'jpegtopnm') cls.pnmfile = os.path.join(cls.pnmdir, 'pnmfile') cls.pnmcut = os.path.join(cls.pnmdir, 'pnmcut') cls.pnmscale = os.path.join(cls.pnmdir, 'pnmscale') cls.pnmrotate = os.path.join(cls.pnmdir, 'pnmrotate') cls.pnmflip = os.path.join(cls.pnmdir, 'pnmflip') cls.pnmtopng = os.path.join(cls.pnmdir, 'pnmtopng') cls.ppmtopgm = os.path.join(cls.pnmdir, 'ppmtopgm') cls.pnmtotiff = os.path.join(cls.pnmdir, 'pnmtotiff') cls.pnmtojpeg = os.path.join(cls.pnmdir, 'pnmtojpeg') cls.pamditherbw = os.path.join(cls.pnmdir, 'pamditherbw') # Need djatoka to get jp2 output cls.djatoka_comp = '/Users/simeon/packages/adore-djatoka-1.1/bin/compress.sh'
python
def find_binaries(cls, tmpdir=None, shellsetup=None, pnmdir=None): """Set instance variables for directory and binary locations. FIXME - should accept params to set things other than defaults. """ cls.tmpdir = ('/tmp' if (tmpdir is None) else tmpdir) # Shell setup command (e.g set library path) cls.shellsetup = ('' if (shellsetup is None) else shellsetup) if (pnmdir is None): cls.pnmdir = '/usr/bin' for dir in ('/usr/local/bin', '/sw/bin'): if (os.path.isfile(os.path.join(dir, 'pngtopnm'))): cls.pnmdir = dir else: cls.pnmdir = pnmdir # Recklessly assume everything else under cls.pnmdir cls.pngtopnm = os.path.join(cls.pnmdir, 'pngtopnm') cls.jpegtopnm = os.path.join(cls.pnmdir, 'jpegtopnm') cls.pnmfile = os.path.join(cls.pnmdir, 'pnmfile') cls.pnmcut = os.path.join(cls.pnmdir, 'pnmcut') cls.pnmscale = os.path.join(cls.pnmdir, 'pnmscale') cls.pnmrotate = os.path.join(cls.pnmdir, 'pnmrotate') cls.pnmflip = os.path.join(cls.pnmdir, 'pnmflip') cls.pnmtopng = os.path.join(cls.pnmdir, 'pnmtopng') cls.ppmtopgm = os.path.join(cls.pnmdir, 'ppmtopgm') cls.pnmtotiff = os.path.join(cls.pnmdir, 'pnmtotiff') cls.pnmtojpeg = os.path.join(cls.pnmdir, 'pnmtojpeg') cls.pamditherbw = os.path.join(cls.pnmdir, 'pamditherbw') # Need djatoka to get jp2 output cls.djatoka_comp = '/Users/simeon/packages/adore-djatoka-1.1/bin/compress.sh'
Set instance variables for directory and binary locations. FIXME - should accept params to set things other than defaults.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L41-L70
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_first
def do_first(self): """Create PNM file from input image file.""" pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
python
def do_first(self): """Create PNM file from input image file.""" pid = os.getpid() self.basename = os.path.join(self.tmpdir, 'iiif_netpbm_' + str(pid)) outfile = self.basename + '.pnm' # Convert source file to pnm filetype = self.file_type(self.srcfile) if (filetype == 'png'): if (self.shell_call(self.pngtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from pngtopnm.") elif (filetype == 'jpg'): if (self.shell_call(self.jpegtopnm + ' ' + self.srcfile + ' > ' + outfile)): raise IIIFError(text="Oops... got error from jpegtopnm.") else: raise IIIFError(code='501', text='bad input file format (only know how to read png/jpeg)') self.tmpfile = outfile # Get size (self.width, self.height) = self.image_size(self.tmpfile)
Create PNM file from input image file.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L72-L90
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_region
def do_region(self, x, y, w, h): """Apply region selection.""" infile = self.tmpfile outfile = self.basename + '.reg' # simeon@ice ~>cat m.pnm | pnmcut 10 10 100 200 > m1.pnm if (x is None): # print "region: full" self.tmpfile = infile else: # print "region: (%d,%d,%d,%d)" % (x,y,w,h) if (self.shell_call('cat ' + infile + ' | ' + self.pnmcut + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h) + ' > ' + outfile)): raise IIIFError(text="Oops... got nonzero output from pnmcut.") self.width = w self.height = h self.tmpfile = outfile
python
def do_region(self, x, y, w, h): """Apply region selection.""" infile = self.tmpfile outfile = self.basename + '.reg' # simeon@ice ~>cat m.pnm | pnmcut 10 10 100 200 > m1.pnm if (x is None): # print "region: full" self.tmpfile = infile else: # print "region: (%d,%d,%d,%d)" % (x,y,w,h) if (self.shell_call('cat ' + infile + ' | ' + self.pnmcut + ' ' + str(x) + ' ' + str(y) + ' ' + str(w) + ' ' + str(h) + ' > ' + outfile)): raise IIIFError(text="Oops... got nonzero output from pnmcut.") self.width = w self.height = h self.tmpfile = outfile
Apply region selection.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L92-L106
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_size
def do_size(self, w, h): """Apply size scaling.""" # simeon@ice ~>cat m1.pnm | pnmscale -width 50 > m2.pnm infile = self.tmpfile outfile = self.basename + '.siz' if (w is None): # print "size: no scaling" self.tmpfile = infile else: # print "size: scaling to (%d,%d)" % (w,h) if (self.shell_call('cat ' + infile + ' | ' + self.pnmscale + ' -width ' + str(w) + ' -height ' + str(h) + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmscale.") self.width = w self.height = h self.tmpfile = outfile
python
def do_size(self, w, h): """Apply size scaling.""" # simeon@ice ~>cat m1.pnm | pnmscale -width 50 > m2.pnm infile = self.tmpfile outfile = self.basename + '.siz' if (w is None): # print "size: no scaling" self.tmpfile = infile else: # print "size: scaling to (%d,%d)" % (w,h) if (self.shell_call('cat ' + infile + ' | ' + self.pnmscale + ' -width ' + str(w) + ' -height ' + str(h) + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmscale.") self.width = w self.height = h self.tmpfile = outfile
Apply size scaling.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L108-L123
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_rotation
def do_rotation(self, mirror, rot): """Apply rotation and/or mirroring.""" infile = self.tmpfile outfile = self.basename + '.rot' # NOTE: pnmrotate: angle must be between -90 and 90 and # rotations is CCW not CW per IIIF spec # # BUG in pnmrotate: +90 and -90 rotations the output image # size may be off. See for example a 1000x1000 image becoming # 1004x1000: # # simeon@RottenApple iiif>file testimages/67352ccc-d1b0-11e1-89ae-279075081939.png # testimages/67352ccc-d1b0-11e1-89ae-279075081939.png: PNG image data, 1000 x 1000, 8-bit/color RGB, non-interlaced # simeon@RottenApple iiif>cat testimages/67352ccc-d1b0-11e1-89ae-279075081939.png | pngtopnm | pnmrotate -90 | pnmtopng > a.png; file a.png; rm a.png # a.png: PNG image data, 1004 x 1000, 8-bit/color RGB, non-interlaced # simeon@RottenApple iiif>cat testimages/67352ccc-d1b0-11e1-89ae-279075081939.png | pngtopnm | pnmrotate 90 | pnmtopng > a.png; file a.png; rm a.png # a.png: PNG image data, 1004 x 1000, 8-bit/color RGB, non-interlaced # # WORKAROUND is to add a pnmscale for the 90degree case, some # simeon@RottenApple iiif>cat testimages/67352ccc-d1b0-11e1-89ae-279075081939.png | pngtopnm | pnmrotate -90| pnmscale -width 1000 -height 1000 | pnmtopng > a.png; file a.png; rm a.png # a.png: PNG image data, 1000 x 1000, 8-bit/color RGB, non-interlaced # # FIXME - add mirroring # if (rot == 0.0): # print "rotation: no rotation" self.tmpfile = infile elif (rot <= 90.0 or rot >= 270.0): if (rot >= 270.0): rot -= 360.0 # print "rotation: by %f degrees clockwise" % (rot) if (self.shell_call('cat ' + infile + ' | ' + self.pnmrotate + ' -background=#FFF ' + str(-rot) + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmrotate.") self.tmpfile = outfile else: # Between 90 and 270 = flip and then -90 to 90 rot -= 180.0 # print "rotation: by %f degrees clockwise" % (rot) if (self.shell_call('cat ' + infile + ' | ' + self.pnmflip + ' -rotate180 | ' + self.pnmrotate + ' ' + str(-rot) + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmrotate.") self.tmpfile = outfile # Fixup size for 90s if (abs(rot % 180.0 - 90.0) < 0.001): outfile2 = self.basename + '.rot2' if (self.shell_call('cat ' + self.tmpfile + ' | ' + self.pnmscale + ' -width ' + str(self.height) + ' -height ' + str(self.width) + ' > ' + outfile2)): raise IIIFError( text="Oops... failed to fixup size after pnmrotate.") self.tmpfile = outfile2
python
def do_rotation(self, mirror, rot): """Apply rotation and/or mirroring.""" infile = self.tmpfile outfile = self.basename + '.rot' # NOTE: pnmrotate: angle must be between -90 and 90 and # rotations is CCW not CW per IIIF spec # # BUG in pnmrotate: +90 and -90 rotations the output image # size may be off. See for example a 1000x1000 image becoming # 1004x1000: # # simeon@RottenApple iiif>file testimages/67352ccc-d1b0-11e1-89ae-279075081939.png # testimages/67352ccc-d1b0-11e1-89ae-279075081939.png: PNG image data, 1000 x 1000, 8-bit/color RGB, non-interlaced # simeon@RottenApple iiif>cat testimages/67352ccc-d1b0-11e1-89ae-279075081939.png | pngtopnm | pnmrotate -90 | pnmtopng > a.png; file a.png; rm a.png # a.png: PNG image data, 1004 x 1000, 8-bit/color RGB, non-interlaced # simeon@RottenApple iiif>cat testimages/67352ccc-d1b0-11e1-89ae-279075081939.png | pngtopnm | pnmrotate 90 | pnmtopng > a.png; file a.png; rm a.png # a.png: PNG image data, 1004 x 1000, 8-bit/color RGB, non-interlaced # # WORKAROUND is to add a pnmscale for the 90degree case, some # simeon@RottenApple iiif>cat testimages/67352ccc-d1b0-11e1-89ae-279075081939.png | pngtopnm | pnmrotate -90| pnmscale -width 1000 -height 1000 | pnmtopng > a.png; file a.png; rm a.png # a.png: PNG image data, 1000 x 1000, 8-bit/color RGB, non-interlaced # # FIXME - add mirroring # if (rot == 0.0): # print "rotation: no rotation" self.tmpfile = infile elif (rot <= 90.0 or rot >= 270.0): if (rot >= 270.0): rot -= 360.0 # print "rotation: by %f degrees clockwise" % (rot) if (self.shell_call('cat ' + infile + ' | ' + self.pnmrotate + ' -background=#FFF ' + str(-rot) + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmrotate.") self.tmpfile = outfile else: # Between 90 and 270 = flip and then -90 to 90 rot -= 180.0 # print "rotation: by %f degrees clockwise" % (rot) if (self.shell_call('cat ' + infile + ' | ' + self.pnmflip + ' -rotate180 | ' + self.pnmrotate + ' ' + str(-rot) + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmrotate.") self.tmpfile = outfile # Fixup size for 90s if (abs(rot % 180.0 - 90.0) < 0.001): outfile2 = self.basename + '.rot2' if (self.shell_call('cat ' + self.tmpfile + ' | ' + self.pnmscale + ' -width ' + str(self.height) + ' -height ' + str(self.width) + ' > ' + outfile2)): raise IIIFError( text="Oops... failed to fixup size after pnmrotate.") self.tmpfile = outfile2
Apply rotation and/or mirroring.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L125-L174
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_quality
def do_quality(self, quality): """Apply value of quality parameter.""" infile = self.tmpfile outfile = self.basename + '.col' # Quality (bit-depth): if (quality == 'grey' or quality == 'gray'): if (self.shell_call('cat ' + infile + ' | ' + self.ppmtopgm + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from ppmtopgm.") self.tmpfile = outfile elif (quality == 'bitonal'): if (self.shell_call('cat ' + infile + ' | ' + self.ppmtopgm + ' | ' + self.pamditherbw + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from ppmtopgm.") self.tmpfile = outfile elif ((quality == 'native' and self.api_version < '2.0') or (quality == 'default' and self.api_version >= '2.0') or quality == 'color'): self.tmpfile = infile else: raise IIIFError(code=400, parameter='quality', text="Unknown quality parameter value requested.")
python
def do_quality(self, quality): """Apply value of quality parameter.""" infile = self.tmpfile outfile = self.basename + '.col' # Quality (bit-depth): if (quality == 'grey' or quality == 'gray'): if (self.shell_call('cat ' + infile + ' | ' + self.ppmtopgm + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from ppmtopgm.") self.tmpfile = outfile elif (quality == 'bitonal'): if (self.shell_call('cat ' + infile + ' | ' + self.ppmtopgm + ' | ' + self.pamditherbw + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from ppmtopgm.") self.tmpfile = outfile elif ((quality == 'native' and self.api_version < '2.0') or (quality == 'default' and self.api_version >= '2.0') or quality == 'color'): self.tmpfile = infile else: raise IIIFError(code=400, parameter='quality', text="Unknown quality parameter value requested.")
Apply value of quality parameter.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L176-L197
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.do_format
def do_format(self, format): """Apply format selection.""" infile = self.tmpfile outfile = self.basename + '.out' outfile_jp2 = self.basename + '.jp2' # Now convert finished pnm file to output format # simeon@ice ~>cat m3.pnm | pnmtojpeg > m4.jpg # simeon@ice ~>cat m3.pnm | pnmtotiff > m4.jpg # pnmtotiff: computing colormap... # pnmtotiff: Too many colors - proceeding to write a 24-bit RGB file. # pnmtotiff: If you want an 8-bit palette file, try doing a 'ppmquant 256'. # simeon@ice ~>cat m3.pnm | pnmtopng > m4.png fmt = ('png' if (format is None) else format) if (fmt == 'png'): # print "format: png" if (self.shell_call(self.pnmtopng + ' ' + infile + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmtopng.") mime_type = "image/png" elif (fmt == 'jpg'): # print "format: jpg" if (self.shell_call(self.pnmtojpeg + ' ' + infile + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmtojpeg.") mime_type = "image/jpeg" elif (fmt == 'tiff' or fmt == 'jp2'): # print "format: tiff/jp2" if (self.shell_call(self.pnmtotiff + ' ' + infile + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmtotiff.") mime_type = "image/tiff" if (fmt == 'jp2'): # use djatoka after tiff if (self.shell_call(DJATOKA_COMP + ' -i ' + outfile + ' -o ' + outfile_jp2)): raise IIIFError( text="Oops... got nonzero output from DJATOKA_COMP.") mime_type = "image/jp2" outfile = tmpfile_jp2 else: raise IIIFError(code=415, parameter='format', text="Unsupported output file format (%s), only png,jpg,tiff are supported." % (fmt)) self.outfile = outfile self.output_format = fmt self.mime_type = mime_type
python
def do_format(self, format): """Apply format selection.""" infile = self.tmpfile outfile = self.basename + '.out' outfile_jp2 = self.basename + '.jp2' # Now convert finished pnm file to output format # simeon@ice ~>cat m3.pnm | pnmtojpeg > m4.jpg # simeon@ice ~>cat m3.pnm | pnmtotiff > m4.jpg # pnmtotiff: computing colormap... # pnmtotiff: Too many colors - proceeding to write a 24-bit RGB file. # pnmtotiff: If you want an 8-bit palette file, try doing a 'ppmquant 256'. # simeon@ice ~>cat m3.pnm | pnmtopng > m4.png fmt = ('png' if (format is None) else format) if (fmt == 'png'): # print "format: png" if (self.shell_call(self.pnmtopng + ' ' + infile + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmtopng.") mime_type = "image/png" elif (fmt == 'jpg'): # print "format: jpg" if (self.shell_call(self.pnmtojpeg + ' ' + infile + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmtojpeg.") mime_type = "image/jpeg" elif (fmt == 'tiff' or fmt == 'jp2'): # print "format: tiff/jp2" if (self.shell_call(self.pnmtotiff + ' ' + infile + ' > ' + outfile)): raise IIIFError( text="Oops... got nonzero output from pnmtotiff.") mime_type = "image/tiff" if (fmt == 'jp2'): # use djatoka after tiff if (self.shell_call(DJATOKA_COMP + ' -i ' + outfile + ' -o ' + outfile_jp2)): raise IIIFError( text="Oops... got nonzero output from DJATOKA_COMP.") mime_type = "image/jp2" outfile = tmpfile_jp2 else: raise IIIFError(code=415, parameter='format', text="Unsupported output file format (%s), only png,jpg,tiff are supported." % (fmt)) self.outfile = outfile self.output_format = fmt self.mime_type = mime_type
Apply format selection.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L199-L242
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.file_type
def file_type(self, file): """Use python-magic to determine file type. Returns 'png' or 'jpg' on success, nothing on failure. """ try: magic_text = magic.from_file(file) if (isinstance(magic_text, bytes)): # In python2 and travis python3 (?!) decode to get unicode string magic_text = magic_text.decode('utf-8') except (TypeError, IOError): return if (re.search('PNG image data', magic_text)): return('png') elif (re.search('JPEG image data', magic_text)): return('jpg') # failed return
python
def file_type(self, file): """Use python-magic to determine file type. Returns 'png' or 'jpg' on success, nothing on failure. """ try: magic_text = magic.from_file(file) if (isinstance(magic_text, bytes)): # In python2 and travis python3 (?!) decode to get unicode string magic_text = magic_text.decode('utf-8') except (TypeError, IOError): return if (re.search('PNG image data', magic_text)): return('png') elif (re.search('JPEG image data', magic_text)): return('jpg') # failed return
Use python-magic to determine file type. Returns 'png' or 'jpg' on success, nothing on failure.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L244-L261
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.image_size
def image_size(self, pnmfile): """Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255 """ pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
python
def image_size(self, pnmfile): """Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255 """ pout = os.popen(self.shellsetup + self.pnmfile + ' ' + pnmfile, 'r') pnmfileout = pout.read(200) pout.close() m = re.search(', (\d+) by (\d+) ', pnmfileout) if (m is None): raise IIIFError( text="Bad output from pnmfile when trying to get size.") w = int(m.group(1)) h = int(m.group(2)) # print "pnmfile output = %s" % (pnmfileout) # print "image size = %d,%d" % (w,h) return(w, h)
Get width and height of pnm file. simeon@homebox src>pnmfile /tmp/214-2.png /tmp/214-2.png:PPM raw, 100 by 100 maxval 255
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L263-L280
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.shell_call
def shell_call(self, shellcmd): """Shell call with necessary setup first.""" return(subprocess.call(self.shellsetup + shellcmd, shell=True))
python
def shell_call(self, shellcmd): """Shell call with necessary setup first.""" return(subprocess.call(self.shellsetup + shellcmd, shell=True))
Shell call with necessary setup first.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L282-L284
zimeon/iiif
iiif/manipulator_netpbm.py
IIIFManipulatorNetpbm.cleanup
def cleanup(self): """Clean up any temporary files.""" for file in glob.glob(self.basename + '*'): os.unlink(file)
python
def cleanup(self): """Clean up any temporary files.""" for file in glob.glob(self.basename + '*'): os.unlink(file)
Clean up any temporary files.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator_netpbm.py#L286-L289
zimeon/iiif
iiif/error.py
IIIFError.image_server_response
def image_server_response(self, api_version=None): """Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response. """ headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
python
def image_server_response(self, api_version=None): """Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response. """ headers = dict(self.headers) if (api_version < '1.1'): headers['Content-Type'] = 'text/xml' response = self.as_xml() else: headers['Content-Type'] = 'text/plain' response = self.as_txt() return(response, self.code, headers)
Response, code and headers for image server error response. api_version selects the format (XML of 1.0). The return value is a tuple of response - body of HTTP response status - the HTTP status code headers - a dict of HTTP headers which will include the Content-Type As a side effect the routine sets self.content_type to the correct media type for the response.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L50-L69
zimeon/iiif
iiif/error.py
IIIFError.as_xml
def as_xml(self): """XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error> """ # Build tree spacing = ("\n" if (self.pretty_xml) else "") root = Element('error', {'xmlns': I3F_NS}) root.text = spacing e_parameter = Element('parameter', {}) e_parameter.text = self.parameter e_parameter.tail = spacing root.append(e_parameter) if (self.text): e_text = Element('text', {}) e_text.text = self.text e_text.tail = spacing root.append(e_text) # Write out as XML document to return tree = ElementTree(root) xml_buf = io.BytesIO() if (sys.version_info < (2, 7)): tree.write(xml_buf, encoding='UTF-8') else: tree.write(xml_buf, encoding='UTF-8', xml_declaration=True, method='xml') return(xml_buf.getvalue().decode('utf-8'))
python
def as_xml(self): """XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error> """ # Build tree spacing = ("\n" if (self.pretty_xml) else "") root = Element('error', {'xmlns': I3F_NS}) root.text = spacing e_parameter = Element('parameter', {}) e_parameter.text = self.parameter e_parameter.tail = spacing root.append(e_parameter) if (self.text): e_text = Element('text', {}) e_text.text = self.text e_text.tail = spacing root.append(e_text) # Write out as XML document to return tree = ElementTree(root) xml_buf = io.BytesIO() if (sys.version_info < (2, 7)): tree.write(xml_buf, encoding='UTF-8') else: tree.write(xml_buf, encoding='UTF-8', xml_declaration=True, method='xml') return(xml_buf.getvalue().decode('utf-8'))
XML representation of the error to be used in HTTP response. This XML format follows the IIIF Image API v1.0 specification, see <http://iiif.io/api/image/1.0/#error>
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L71-L98
zimeon/iiif
iiif/error.py
IIIFError.as_txt
def as_txt(self): """Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging. """ s = "IIIF Image Server Error\n\n" s += self.text if (self.text) else 'UNKNOWN_ERROR' s += "\n\n" if (self.parameter): s += "parameter=%s\n" % self.parameter if (self.code): s += "code=%d\n\n" % self.code for header in sorted(self.headers): s += "header %s=%s\n" % (header, self.headers[header]) return s
python
def as_txt(self): """Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging. """ s = "IIIF Image Server Error\n\n" s += self.text if (self.text) else 'UNKNOWN_ERROR' s += "\n\n" if (self.parameter): s += "parameter=%s\n" % self.parameter if (self.code): s += "code=%d\n\n" % self.code for header in sorted(self.headers): s += "header %s=%s\n" % (header, self.headers[header]) return s
Text rendering of error response. Designed for use with Image API version 1.1 and above where the error response is suggested to be text or html but not otherwise specified. Intended to provide useful information for debugging.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/error.py#L100-L116
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.login_handler
def login_handler(self, config=None, prefix=None, **args): """OAuth starts here, redirect user to Google.""" params = { 'response_type': 'code', 'client_id': self.google_api_client_id, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'scope': self.google_api_scope, 'state': self.request_args_get('next', default=''), } url = self.google_oauth2_url + 'auth?' + urlencode(params) return self.login_handler_redirect(url)
python
def login_handler(self, config=None, prefix=None, **args): """OAuth starts here, redirect user to Google.""" params = { 'response_type': 'code', 'client_id': self.google_api_client_id, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'scope': self.google_api_scope, 'state': self.request_args_get('next', default=''), } url = self.google_oauth2_url + 'auth?' + urlencode(params) return self.login_handler_redirect(url)
OAuth starts here, redirect user to Google.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L53-L64
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.home_handler
def home_handler(self, config=None, prefix=None, **args): """Handler for /home redirect path after Google auth. OAuth ends up back here from Google. Set the account cookie and close window to trigger next step. """ gresponse = self.google_get_token(config, prefix) gdata = self.google_get_data(config, gresponse) email = gdata.get('email', 'NO_EMAIL') name = gdata.get('name', 'NO_NAME') # Make and store cookie from identity, set and close window cookie = self.access_cookie(name + ' ' + email) return self.set_cookie_close_window_response(cookie)
python
def home_handler(self, config=None, prefix=None, **args): """Handler for /home redirect path after Google auth. OAuth ends up back here from Google. Set the account cookie and close window to trigger next step. """ gresponse = self.google_get_token(config, prefix) gdata = self.google_get_data(config, gresponse) email = gdata.get('email', 'NO_EMAIL') name = gdata.get('name', 'NO_NAME') # Make and store cookie from identity, set and close window cookie = self.access_cookie(name + ' ' + email) return self.set_cookie_close_window_response(cookie)
Handler for /home redirect path after Google auth. OAuth ends up back here from Google. Set the account cookie and close window to trigger next step.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L66-L78
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.google_get_token
def google_get_token(self, config, prefix): """Make request to Google API to get token.""" params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
python
def google_get_token(self, config, prefix): """Make request to Google API to get token.""" params = { 'code': self.request_args_get( 'code', default=''), 'client_id': self.google_api_client_id, 'client_secret': self.google_api_client_secret, 'redirect_uri': self.scheme_host_port_prefix( 'http', config.host, config.port, prefix) + '/home', 'grant_type': 'authorization_code', } payload = urlencode(params).encode('utf-8') url = self.google_oauth2_url + 'token' req = Request(url, payload) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
Make request to Google API to get token.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L84-L100
zimeon/iiif
iiif/auth_google.py
IIIFAuthGoogle.google_get_data
def google_get_data(self, config, response): """Make request to Google API to get profile data for the user.""" params = { 'access_token': response['access_token'], } payload = urlencode(params) url = self.google_api_url + 'userinfo?' + payload req = Request(url) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
python
def google_get_data(self, config, response): """Make request to Google API to get profile data for the user.""" params = { 'access_token': response['access_token'], } payload = urlencode(params) url = self.google_api_url + 'userinfo?' + payload req = Request(url) json_str = urlopen(req).read() return json.loads(json_str.decode('utf-8'))
Make request to Google API to get profile data for the user.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/auth_google.py#L102-L111
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.compliance_uri
def compliance_uri(self): """Compliance URI based on api_version. Value is based on api_version and complicance_level, will be None if either are unset/unrecognized. The assumption here is that the api_version and level are orthogonal, override this method if that isn't true. """ if (self.api_version == '1.0'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d' elif (self.api_version == '1.1'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level%d' elif (self.api_version == '2.0' or self.api_version == '2.1'): uri_pattern = r'http://iiif.io/api/image/2/level%d.json' else: return if (self.compliance_level is None): return return(uri_pattern % self.compliance_level)
python
def compliance_uri(self): """Compliance URI based on api_version. Value is based on api_version and complicance_level, will be None if either are unset/unrecognized. The assumption here is that the api_version and level are orthogonal, override this method if that isn't true. """ if (self.api_version == '1.0'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/compliance.html#level%d' elif (self.api_version == '1.1'): uri_pattern = r'http://library.stanford.edu/iiif/image-api/1.1/compliance.html#level%d' elif (self.api_version == '2.0' or self.api_version == '2.1'): uri_pattern = r'http://iiif.io/api/image/2/level%d.json' else: return if (self.compliance_level is None): return return(uri_pattern % self.compliance_level)
Compliance URI based on api_version. Value is based on api_version and complicance_level, will be None if either are unset/unrecognized. The assumption here is that the api_version and level are orthogonal, override this method if that isn't true.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L49-L68
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.derive
def derive(self, srcfile=None, request=None, outfile=None): """Do sequence of manipulations for IIIF to derive output image. Named argments: srcfile -- source image file request -- IIIFRequest object with parsed parameters outfile -- output image file. If set the the output file will be written to that file, otherwise a new temporary file will be created and outfile set to its location. See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order Region THEN Size THEN Rotation THEN Quality THEN Format Typical use: r = IIIFRequest(region=...) m = IIIFManipulator() try: m.derive(srcfile='a.jpg',request=r) # .. serve m.outfile except IIIFError as e: # .. finally: m.cleanup() #removes temp m.outfile """ # set if specified if (srcfile is not None): self.srcfile = srcfile if (request is not None): self.request = request if (outfile is not None): self.outfile = outfile if (self.outfile is not None): # create path to output dir if necessary dir = os.path.dirname(self.outfile) if (not os.path.exists(dir)): os.makedirs(dir) # self.do_first() (x, y, w, h) = self.region_to_apply() self.do_region(x, y, w, h) (w, h) = self.size_to_apply() self.do_size(w, h) (mirror, rot) = self.rotation_to_apply(no_mirror=True) self.do_rotation(mirror, rot) (quality) = self.quality_to_apply() self.do_quality(quality) self.do_format(self.request.format) self.do_last() return(self.outfile, self.mime_type)
python
def derive(self, srcfile=None, request=None, outfile=None): """Do sequence of manipulations for IIIF to derive output image. Named argments: srcfile -- source image file request -- IIIFRequest object with parsed parameters outfile -- output image file. If set the the output file will be written to that file, otherwise a new temporary file will be created and outfile set to its location. See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order Region THEN Size THEN Rotation THEN Quality THEN Format Typical use: r = IIIFRequest(region=...) m = IIIFManipulator() try: m.derive(srcfile='a.jpg',request=r) # .. serve m.outfile except IIIFError as e: # .. finally: m.cleanup() #removes temp m.outfile """ # set if specified if (srcfile is not None): self.srcfile = srcfile if (request is not None): self.request = request if (outfile is not None): self.outfile = outfile if (self.outfile is not None): # create path to output dir if necessary dir = os.path.dirname(self.outfile) if (not os.path.exists(dir)): os.makedirs(dir) # self.do_first() (x, y, w, h) = self.region_to_apply() self.do_region(x, y, w, h) (w, h) = self.size_to_apply() self.do_size(w, h) (mirror, rot) = self.rotation_to_apply(no_mirror=True) self.do_rotation(mirror, rot) (quality) = self.quality_to_apply() self.do_quality(quality) self.do_format(self.request.format) self.do_last() return(self.outfile, self.mime_type)
Do sequence of manipulations for IIIF to derive output image. Named argments: srcfile -- source image file request -- IIIFRequest object with parsed parameters outfile -- output image file. If set the the output file will be written to that file, otherwise a new temporary file will be created and outfile set to its location. See order in spec: http://www-sul.stanford.edu/iiif/image-api/#order Region THEN Size THEN Rotation THEN Quality THEN Format Typical use: r = IIIFRequest(region=...) m = IIIFManipulator() try: m.derive(srcfile='a.jpg',request=r) # .. serve m.outfile except IIIFError as e: # .. finally: m.cleanup() #removes temp m.outfile
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L70-L120
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_region
def do_region(self, x, y, w, h): """Null implementation of region selection.""" if (x is not None): raise IIIFError(code=501, parameter="region", text="Null manipulator supports only region=/full/.")
python
def do_region(self, x, y, w, h): """Null implementation of region selection.""" if (x is not None): raise IIIFError(code=501, parameter="region", text="Null manipulator supports only region=/full/.")
Null implementation of region selection.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L130-L134
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_rotation
def do_rotation(self, mirror, rot): """Null implementation of rotate and/or mirror.""" if (mirror): raise IIIFError(code=501, parameter="rotation", text="Null manipulator does not support mirroring.") if (rot != 0.0): raise IIIFError(code=501, parameter="rotation", text="Null manipulator supports only rotation=(0|360).")
python
def do_rotation(self, mirror, rot): """Null implementation of rotate and/or mirror.""" if (mirror): raise IIIFError(code=501, parameter="rotation", text="Null manipulator does not support mirroring.") if (rot != 0.0): raise IIIFError(code=501, parameter="rotation", text="Null manipulator supports only rotation=(0|360).")
Null implementation of rotate and/or mirror.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L142-L149
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_quality
def do_quality(self, quality): """Null implementation of quality.""" if (self.api_version >= '2.0'): if (quality != "default"): raise IIIFError(code=501, parameter="default", text="Null manipulator supports only quality=default.") else: # versions 1.0 and 1.1 if (quality != "native"): raise IIIFError(code=501, parameter="native", text="Null manipulator supports only quality=native.")
python
def do_quality(self, quality): """Null implementation of quality.""" if (self.api_version >= '2.0'): if (quality != "default"): raise IIIFError(code=501, parameter="default", text="Null manipulator supports only quality=default.") else: # versions 1.0 and 1.1 if (quality != "native"): raise IIIFError(code=501, parameter="native", text="Null manipulator supports only quality=native.")
Null implementation of quality.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L151-L160
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.do_format
def do_format(self, format): """Null implementation of format selection. This is the last step, this null implementation does not accept any specification of a format because we don't even know what the input format is. """ if (format is not None): raise IIIFError(code=415, parameter="format", text="Null manipulator does not support specification of output format.") # if (self.outfile is None): self.outfile = self.srcfile else: try: shutil.copyfile(self.srcfile, self.outfile) except IOError as e: raise IIIFError(code=500, text="Failed to copy file (%s)." % (str(e))) self.mime_type = None
python
def do_format(self, format): """Null implementation of format selection. This is the last step, this null implementation does not accept any specification of a format because we don't even know what the input format is. """ if (format is not None): raise IIIFError(code=415, parameter="format", text="Null manipulator does not support specification of output format.") # if (self.outfile is None): self.outfile = self.srcfile else: try: shutil.copyfile(self.srcfile, self.outfile) except IOError as e: raise IIIFError(code=500, text="Failed to copy file (%s)." % (str(e))) self.mime_type = None
Null implementation of format selection. This is the last step, this null implementation does not accept any specification of a format because we don't even know what the input format is.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L162-L181
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.region_to_apply
def region_to_apply(self): """Return the x,y,w,h parameters to extract given image width and height. Assume image width and height are available in self.width and self.height, and self.request is IIIFRequest object Expected use: (x,y,w,h) = self.region_to_apply() if (x is None): # full image else: # extract Returns (None,None,None,None) if no extraction is required. """ if (self.request.region_full or (self.request.region_pct and self.request.region_xywh == (0, 0, 100, 100))): return(None, None, None, None) # Cannot do anything else unless we know size (in self.width and # self.height) if (self.width <= 0 or self.height <= 0): raise IIIFError(code=501, parameter='region', text="Region parameters require knowledge of image size which is not implemented.") if (self.request.region_square): if (self.width <= self.height): y_offset = (self.height - self.width) / 2 return(0, y_offset, self.width, self.width) else: # self.width>self.height x_offset = (self.width - self.height) / 2 return(x_offset, 0, self.height, self.height) # pct or explicit pixel sizes pct = self.request.region_pct (x, y, w, h) = self.request.region_xywh # Convert pct to pixels based on actual size if (pct): x = int((x / 100.0) * self.width + 0.5) y = int((y / 100.0) * self.height + 0.5) w = int((w / 100.0) * self.width + 0.5) h = int((h / 100.0) * self.height + 0.5) # Check if boundary extends beyond image and truncate if ((x + w) > self.width): w = self.width - x if ((y + h) > self.height): h = self.height - y # Final check to see if we have the whole image if (w == 0 or h == 0): raise IIIFZeroSizeError(code=400, parameter='region', text="Region parameters would result in zero size result image.") if (x == 0 and y == 0 and w == self.width and h == self.height): return(None, None, None, None) return(x, y, w, h)
python
def region_to_apply(self): """Return the x,y,w,h parameters to extract given image width and height. Assume image width and height are available in self.width and self.height, and self.request is IIIFRequest object Expected use: (x,y,w,h) = self.region_to_apply() if (x is None): # full image else: # extract Returns (None,None,None,None) if no extraction is required. """ if (self.request.region_full or (self.request.region_pct and self.request.region_xywh == (0, 0, 100, 100))): return(None, None, None, None) # Cannot do anything else unless we know size (in self.width and # self.height) if (self.width <= 0 or self.height <= 0): raise IIIFError(code=501, parameter='region', text="Region parameters require knowledge of image size which is not implemented.") if (self.request.region_square): if (self.width <= self.height): y_offset = (self.height - self.width) / 2 return(0, y_offset, self.width, self.width) else: # self.width>self.height x_offset = (self.width - self.height) / 2 return(x_offset, 0, self.height, self.height) # pct or explicit pixel sizes pct = self.request.region_pct (x, y, w, h) = self.request.region_xywh # Convert pct to pixels based on actual size if (pct): x = int((x / 100.0) * self.width + 0.5) y = int((y / 100.0) * self.height + 0.5) w = int((w / 100.0) * self.width + 0.5) h = int((h / 100.0) * self.height + 0.5) # Check if boundary extends beyond image and truncate if ((x + w) > self.width): w = self.width - x if ((y + h) > self.height): h = self.height - y # Final check to see if we have the whole image if (w == 0 or h == 0): raise IIIFZeroSizeError(code=400, parameter='region', text="Region parameters would result in zero size result image.") if (x == 0 and y == 0 and w == self.width and h == self.height): return(None, None, None, None) return(x, y, w, h)
Return the x,y,w,h parameters to extract given image width and height. Assume image width and height are available in self.width and self.height, and self.request is IIIFRequest object Expected use: (x,y,w,h) = self.region_to_apply() if (x is None): # full image else: # extract Returns (None,None,None,None) if no extraction is required.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L192-L243
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.size_to_apply
def size_to_apply(self): """Calculate size of image scaled using size parameters. Assumes current image width and height are available in self.width and self.height, and self.request is IIIFRequest object. Formats are: w, ,h w,h pct:p !w,h full max Returns (None,None) if no scaling is required. If max is requested and neither max_area or max_width are specified then this is the same as full. Otherwise the limits are used to determine the size. """ if (self.request.size_full or self.request.size_pct == 100.0): # full size return(None, None) # Not trivially full size, look at possibilities in turn w = self.width h = self.height if (self.request.size_max): # use size limits if present, else full if (self.max_area and self.max_area < (w * h)): scale = (float(self.max_area) / float(w * h)) ** 0.5 w = int(w * scale + 0.5) h = int(h * scale + 0.5) if (self.max_width): max_height = self.max_height if self.max_height is not None else self.max_width if (self.max_width < w): # calculate wrt original width, height rather than # w, h to avoid compounding rounding issues scale = float(self.max_width) / float(self.width) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) if (max_height < h): scale = float(max_height) / float(self.height) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) elif (self.request.size_pct is not None): w = int(self.width * self.request.size_pct / 100.0 + 0.5) h = int(self.height * self.request.size_pct / 100.0 + 0.5) elif (self.request.size_bang): # Have "!w,h" form (mw, mh) = self.request.size_wh # Pick smaller fraction and then work from that... frac = min((float(mw) / float(self.width)), (float(mh) / float(self.height))) w = int(self.width * frac + 0.5) h = int(self.height * frac + 0.5) else: # Must now be "w,h", "w," or ",h". If both are specified then this will the size, # otherwise find other to keep aspect ratio (w, h) = self.request.size_wh if (w is None): w = int(self.width * h / self.height + 0.5) elif (h is None): h = int(self.height * w / self.width + 0.5) # Now have w,h, sanity check and return if (w == 0 or h == 0): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameter would result in zero size result image (%d,%d)." % (w, h)) # Below would be test for scaling up image size, this is allowed by spec # if ( w>self.width or h>self.height ): # raise IIIFError(code=400,parameter='size', # text="Size requests scaling up image to larger than orginal.") if (w == self.width and h == self.height): return(None, None) return(w, h)
python
def size_to_apply(self): """Calculate size of image scaled using size parameters. Assumes current image width and height are available in self.width and self.height, and self.request is IIIFRequest object. Formats are: w, ,h w,h pct:p !w,h full max Returns (None,None) if no scaling is required. If max is requested and neither max_area or max_width are specified then this is the same as full. Otherwise the limits are used to determine the size. """ if (self.request.size_full or self.request.size_pct == 100.0): # full size return(None, None) # Not trivially full size, look at possibilities in turn w = self.width h = self.height if (self.request.size_max): # use size limits if present, else full if (self.max_area and self.max_area < (w * h)): scale = (float(self.max_area) / float(w * h)) ** 0.5 w = int(w * scale + 0.5) h = int(h * scale + 0.5) if (self.max_width): max_height = self.max_height if self.max_height is not None else self.max_width if (self.max_width < w): # calculate wrt original width, height rather than # w, h to avoid compounding rounding issues scale = float(self.max_width) / float(self.width) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) if (max_height < h): scale = float(max_height) / float(self.height) w = int(self.width * scale + 0.5) h = int(self.height * scale + 0.5) elif (self.request.size_pct is not None): w = int(self.width * self.request.size_pct / 100.0 + 0.5) h = int(self.height * self.request.size_pct / 100.0 + 0.5) elif (self.request.size_bang): # Have "!w,h" form (mw, mh) = self.request.size_wh # Pick smaller fraction and then work from that... frac = min((float(mw) / float(self.width)), (float(mh) / float(self.height))) w = int(self.width * frac + 0.5) h = int(self.height * frac + 0.5) else: # Must now be "w,h", "w," or ",h". If both are specified then this will the size, # otherwise find other to keep aspect ratio (w, h) = self.request.size_wh if (w is None): w = int(self.width * h / self.height + 0.5) elif (h is None): h = int(self.height * w / self.width + 0.5) # Now have w,h, sanity check and return if (w == 0 or h == 0): raise IIIFZeroSizeError( code=400, parameter='size', text="Size parameter would result in zero size result image (%d,%d)." % (w, h)) # Below would be test for scaling up image size, this is allowed by spec # if ( w>self.width or h>self.height ): # raise IIIFError(code=400,parameter='size', # text="Size requests scaling up image to larger than orginal.") if (w == self.width and h == self.height): return(None, None) return(w, h)
Calculate size of image scaled using size parameters. Assumes current image width and height are available in self.width and self.height, and self.request is IIIFRequest object. Formats are: w, ,h w,h pct:p !w,h full max Returns (None,None) if no scaling is required. If max is requested and neither max_area or max_width are specified then this is the same as full. Otherwise the limits are used to determine the size.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L245-L313
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.rotation_to_apply
def rotation_to_apply(self, only90s=False, no_mirror=False): """Check and interpret rotation. Returns a truth value as to whether to mirror, and a floating point number 0 <= angle < 360 (degrees). """ rotation = self.request.rotation_deg if (no_mirror and self.request.rotation_mirror): raise IIIFError(code=501, parameter="rotation", text="This implementation does not support mirroring.") if (only90s and (rotation != 0.0 and rotation != 90.0 and rotation != 180.0 and rotation != 270.0)): raise IIIFError(code=501, parameter="rotation", text="This implementation supports only 0,90,180,270 degree rotations.") return(self.request.rotation_mirror, rotation)
python
def rotation_to_apply(self, only90s=False, no_mirror=False): """Check and interpret rotation. Returns a truth value as to whether to mirror, and a floating point number 0 <= angle < 360 (degrees). """ rotation = self.request.rotation_deg if (no_mirror and self.request.rotation_mirror): raise IIIFError(code=501, parameter="rotation", text="This implementation does not support mirroring.") if (only90s and (rotation != 0.0 and rotation != 90.0 and rotation != 180.0 and rotation != 270.0)): raise IIIFError(code=501, parameter="rotation", text="This implementation supports only 0,90,180,270 degree rotations.") return(self.request.rotation_mirror, rotation)
Check and interpret rotation. Returns a truth value as to whether to mirror, and a floating point number 0 <= angle < 360 (degrees).
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L315-L329
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.quality_to_apply
def quality_to_apply(self): """Value of quality parameter to use in processing request. Simple substitution of 'native' or 'default' if no quality parameter is specified. """ if (self.request.quality is None): if (self.api_version <= '1.1'): return('native') else: return('default') return(self.request.quality)
python
def quality_to_apply(self): """Value of quality parameter to use in processing request. Simple substitution of 'native' or 'default' if no quality parameter is specified. """ if (self.request.quality is None): if (self.api_version <= '1.1'): return('native') else: return('default') return(self.request.quality)
Value of quality parameter to use in processing request. Simple substitution of 'native' or 'default' if no quality parameter is specified.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L331-L342
zimeon/iiif
iiif/manipulator.py
IIIFManipulator.scale_factors
def scale_factors(self, tile_width, tile_height=None): """Return a set of scale factors for given tile and window size. Gives a set of scale factors, starting at 1, and in multiples of 2. Largest scale_factor is so that one tile will cover the entire image (self.width,self.height). If tile_height is not specified then tiles are assumed to be squares of tile_width pixels. """ if (not tile_height): tile_height = tile_width sf = 1 scale_factors = [sf] for j in range(30): # limit of 2^30, should be enough! sf = 2 * sf if (tile_width * sf > self.width and tile_height * sf > self.height): break scale_factors.append(sf) return scale_factors
python
def scale_factors(self, tile_width, tile_height=None): """Return a set of scale factors for given tile and window size. Gives a set of scale factors, starting at 1, and in multiples of 2. Largest scale_factor is so that one tile will cover the entire image (self.width,self.height). If tile_height is not specified then tiles are assumed to be squares of tile_width pixels. """ if (not tile_height): tile_height = tile_width sf = 1 scale_factors = [sf] for j in range(30): # limit of 2^30, should be enough! sf = 2 * sf if (tile_width * sf > self.width and tile_height * sf > self.height): break scale_factors.append(sf) return scale_factors
Return a set of scale factors for given tile and window size. Gives a set of scale factors, starting at 1, and in multiples of 2. Largest scale_factor is so that one tile will cover the entire image (self.width,self.height). If tile_height is not specified then tiles are assumed to be squares of tile_width pixels.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/manipulator.py#L353-L373
zimeon/iiif
iiif/generators/diagonal_cross.py
PixelGen.pixel
def pixel(self, x, y, size=None): """Return color for a pixel.""" if (size is None): size = self.sz # Have we go to the smallest element? if (size <= 3): if (_not_diagonal(x, y)): return None else: return (0, 0, 0) divisor = size // 3 if (_not_diagonal(x // divisor, y // divisor)): return None return self.pixel(x % divisor, y % divisor, divisor)
python
def pixel(self, x, y, size=None): """Return color for a pixel.""" if (size is None): size = self.sz # Have we go to the smallest element? if (size <= 3): if (_not_diagonal(x, y)): return None else: return (0, 0, 0) divisor = size // 3 if (_not_diagonal(x // divisor, y // divisor)): return None return self.pixel(x % divisor, y % divisor, divisor)
Return color for a pixel.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/diagonal_cross.py#L29-L42
zimeon/iiif
iiif/generators/mandlebrot_100k.py
PixelGen.color
def color(self, n): """Color of pixel that reached limit after n iterations. Returns a color tuple for use with PIL, tending toward red as we tend toward self.max_iter iterations. """ red = int(n * self.shade_factor) if (red > 255): red = 255 return (red, 50, 100)
python
def color(self, n): """Color of pixel that reached limit after n iterations. Returns a color tuple for use with PIL, tending toward red as we tend toward self.max_iter iterations. """ red = int(n * self.shade_factor) if (red > 255): red = 255 return (red, 50, 100)
Color of pixel that reached limit after n iterations. Returns a color tuple for use with PIL, tending toward red as we tend toward self.max_iter iterations.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/mandlebrot_100k.py#L44-L53
zimeon/iiif
iiif/generators/mandlebrot_100k.py
PixelGen.mpixel
def mpixel(self, z, n=0): """Iteration in Mandlebrot coordinate z.""" z = z * z + self.c if (abs(z) > 2.0): return self.color(n) n += 1 if (n > self.max_iter): return None return self.mpixel(z, n)
python
def mpixel(self, z, n=0): """Iteration in Mandlebrot coordinate z.""" z = z * z + self.c if (abs(z) > 2.0): return self.color(n) n += 1 if (n > self.max_iter): return None return self.mpixel(z, n)
Iteration in Mandlebrot coordinate z.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/mandlebrot_100k.py#L55-L63
zimeon/iiif
iiif/generators/mandlebrot_100k.py
PixelGen.pixel
def pixel(self, ix, iy): """Return color for a pixel. Does translation from image coordinates (ix,iy) into the complex plane coordinate z = x+yi, and then calls self.mpixel(z) to find the color at point z. """ x = (ix - self.xoffset + 0.5) / self.scale y = (iy - self.yoffset + 0.5) / self.scale z = complex(x, y) self.set_c(z) return self.mpixel(z)
python
def pixel(self, ix, iy): """Return color for a pixel. Does translation from image coordinates (ix,iy) into the complex plane coordinate z = x+yi, and then calls self.mpixel(z) to find the color at point z. """ x = (ix - self.xoffset + 0.5) / self.scale y = (iy - self.yoffset + 0.5) / self.scale z = complex(x, y) self.set_c(z) return self.mpixel(z)
Return color for a pixel. Does translation from image coordinates (ix,iy) into the complex plane coordinate z = x+yi, and then calls self.mpixel(z) to find the color at point z.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/generators/mandlebrot_100k.py#L65-L76
zimeon/iiif
iiif/static.py
static_partial_tile_sizes
def static_partial_tile_sizes(width, height, tilesize, scale_factors): """Generator for partial tile sizes for zoomed in views. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles scale_factors -- iterable of scale factors, typically [1,2,4..] Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile """ for sf in scale_factors: if (sf * tilesize >= width and sf * tilesize >= height): continue # avoid any full-region tiles rts = tilesize * sf # tile size in original region xt = (width - 1) // rts + 1 yt = (height - 1) // rts + 1 for nx in range(xt): rx = nx * rts rxe = rx + rts if (rxe > width): rxe = width rw = rxe - rx # same as sw = int(math.ceil(rw/float(sf))) sw = (rw + sf - 1) // sf for ny in range(yt): ry = ny * rts rye = ry + rts if (rye > height): rye = height rh = rye - ry # same as sh = int(math.ceil(rh/float(sf))) sh = (rh + sf - 1) // sf yield([rx, ry, rw, rh], [sw, sh])
python
def static_partial_tile_sizes(width, height, tilesize, scale_factors): """Generator for partial tile sizes for zoomed in views. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles scale_factors -- iterable of scale factors, typically [1,2,4..] Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile """ for sf in scale_factors: if (sf * tilesize >= width and sf * tilesize >= height): continue # avoid any full-region tiles rts = tilesize * sf # tile size in original region xt = (width - 1) // rts + 1 yt = (height - 1) // rts + 1 for nx in range(xt): rx = nx * rts rxe = rx + rts if (rxe > width): rxe = width rw = rxe - rx # same as sw = int(math.ceil(rw/float(sf))) sw = (rw + sf - 1) // sf for ny in range(yt): ry = ny * rts rye = ry + rts if (rye > height): rye = height rh = rye - ry # same as sh = int(math.ceil(rh/float(sf))) sh = (rh + sf - 1) // sf yield([rx, ry, rw, rh], [sw, sh])
Generator for partial tile sizes for zoomed in views. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles scale_factors -- iterable of scale factors, typically [1,2,4..] Yields ([rx,ry,rw,rh],[sw,sh]), the region and size for each tile
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L21-L54
zimeon/iiif
iiif/static.py
static_full_sizes
def static_full_sizes(width, height, tilesize): """Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize. """ # FIXME - Not sure what correct algorithm is for this, from # observation of Openseadragon it seems that one keeps halving # the pixel size of the full image until until both width and # height are less than the tile size. After that all subsequent # halving of the image size are used, all the way down to 1,1. # It seems that without these reduced size full-region images, # OpenSeadragon will not display any unzoomed image in small windows. # # I do not understand the algorithm that OpenSeadragon uses (or # know where it is in the code) to decide how small a version of # the complete image to request. It seems that there is a bug in # OpenSeadragon here because in some cases it requests images # of size 1,1 multiple times, which is anyway a useless image. for level in range(0, 20): factor = 2.0**level sw = int(width / factor + 0.5) sh = int(height / factor + 0.5) if (sw < tilesize and sh < tilesize): if (sw < 1 or sh < 1): break yield([sw, sh])
python
def static_full_sizes(width, height, tilesize): """Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize. """ # FIXME - Not sure what correct algorithm is for this, from # observation of Openseadragon it seems that one keeps halving # the pixel size of the full image until until both width and # height are less than the tile size. After that all subsequent # halving of the image size are used, all the way down to 1,1. # It seems that without these reduced size full-region images, # OpenSeadragon will not display any unzoomed image in small windows. # # I do not understand the algorithm that OpenSeadragon uses (or # know where it is in the code) to decide how small a version of # the complete image to request. It seems that there is a bug in # OpenSeadragon here because in some cases it requests images # of size 1,1 multiple times, which is anyway a useless image. for level in range(0, 20): factor = 2.0**level sw = int(width / factor + 0.5) sh = int(height / factor + 0.5) if (sw < tilesize and sh < tilesize): if (sw < 1 or sh < 1): break yield([sw, sh])
Generator for scaled-down full image sizes. Positional arguments: width -- width of full size image height -- height of full size image tilesize -- width and height of tiles Yields [sw,sh], the size for each full-region tile that is less than the tilesize. This includes tiles up to the full image size if that is smaller than the tilesize.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L57-L89
zimeon/iiif
iiif/static.py
IIIFStatic.parse_extra
def parse_extra(self, extra): """Parse extra request parameters to IIIFRequest object.""" if extra.startswith('/'): extra = extra[1:] r = IIIFRequest(identifier='dummy', api_version=self.api_version) r.parse_url(extra) if (r.info): raise IIIFStaticError("Attempt to specify Image Information in extras.") return(r)
python
def parse_extra(self, extra): """Parse extra request parameters to IIIFRequest object.""" if extra.startswith('/'): extra = extra[1:] r = IIIFRequest(identifier='dummy', api_version=self.api_version) r.parse_url(extra) if (r.info): raise IIIFStaticError("Attempt to specify Image Information in extras.") return(r)
Parse extra request parameters to IIIFRequest object.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L191-L200
zimeon/iiif
iiif/static.py
IIIFStatic.get_osd_config
def get_osd_config(self, osd_version): """Select appropriate portion of config. If the version requested is not supported the raise an exception with a helpful error message listing the versions supported. """ if (osd_version in self.osd_config): return(self.osd_config[osd_version]) else: raise IIIFStaticError("OpenSeadragon version %s not supported, available versions are %s" % (osd_version, ', '.join(sorted(self.osd_config.keys()))))
python
def get_osd_config(self, osd_version): """Select appropriate portion of config. If the version requested is not supported the raise an exception with a helpful error message listing the versions supported. """ if (osd_version in self.osd_config): return(self.osd_config[osd_version]) else: raise IIIFStaticError("OpenSeadragon version %s not supported, available versions are %s" % (osd_version, ', '.join(sorted(self.osd_config.keys()))))
Select appropriate portion of config. If the version requested is not supported the raise an exception with a helpful error message listing the versions supported.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L202-L212
zimeon/iiif
iiif/static.py
IIIFStatic.generate
def generate(self, src=None, identifier=None): """Generate static files for one source image.""" self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
python
def generate(self, src=None, identifier=None): """Generate static files for one source image.""" self.src = src self.identifier = identifier # Get image details and calculate tiles im = self.manipulator_klass() im.srcfile = self.src im.set_max_image_pixels(self.max_image_pixels) im.do_first() width = im.width height = im.height scale_factors = im.scale_factors(self.tilesize) # Setup destination and IIIF identifier self.setup_destination() # Write out images for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors): self.generate_tile(region, size) sizes = [] for size in static_full_sizes(width, height, self.tilesize): # See https://github.com/zimeon/iiif/issues/9 sizes.append({'width': size[0], 'height': size[1]}) self.generate_tile('full', size) for request in self.extras: request.identifier = self.identifier if (request.is_scaled_full_image()): sizes.append({'width': request.size_wh[0], 'height': request.size_wh[1]}) self.generate_file(request) # Write info.json qualities = ['default'] if (self.api_version > '1.1') else ['native'] info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier, width=width, height=height, scale_factors=scale_factors, tile_width=self.tilesize, tile_height=self.tilesize, formats=['jpg'], qualities=qualities, sizes=sizes, api_version=self.api_version) json_file = os.path.join(self.dst, self.identifier, 'info.json') if (self.dryrun): self.logger.warning( "dryrun mode, would write the following files:") self.logger.warning("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) else: with open(json_file, 'w') as f: f.write(info.as_json()) f.close() self.logger.info("%s / %s/%s" % (self.dst, self.identifier, 'info.json')) self.logger.debug("Written %s" % (json_file))
Generate static files for one source image.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L214-L261
zimeon/iiif
iiif/static.py
IIIFStatic.generate_tile
def generate_tile(self, region, size): """Generate one tile for this given region, size of this image.""" r = IIIFRequest(identifier=self.identifier, api_version=self.api_version) if (region == 'full'): r.region_full = True else: r.region_xywh = region # [rx,ry,rw,rh] r.size_wh = size # [sw,sh] r.format = 'jpg' self.generate_file(r, True)
python
def generate_tile(self, region, size): """Generate one tile for this given region, size of this image.""" r = IIIFRequest(identifier=self.identifier, api_version=self.api_version) if (region == 'full'): r.region_full = True else: r.region_xywh = region # [rx,ry,rw,rh] r.size_wh = size # [sw,sh] r.format = 'jpg' self.generate_file(r, True)
Generate one tile for this given region, size of this image.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L263-L273
zimeon/iiif
iiif/static.py
IIIFStatic.generate_file
def generate_file(self, r, undistorted=False): """Generate file for IIIFRequest object r from this image. FIXME - Would be nicer to have the test for an undistorted image request based on the IIIFRequest object, and then know whether to apply canonicalization or not. Logically we might use `w,h` instead of the Image API v2.0 canonical form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume the new canonical form even in the case where the API version is declared earlier. Thus, determine whether to use the canonical or `w,h` form based solely on the setting of osd_version. """ use_canonical = self.get_osd_config(self.osd_version)['use_canonical'] height = None if (undistorted and use_canonical): height = r.size_wh[1] r.size_wh = [r.size_wh[0], None] # [sw,sh] -> [sw,] path = r.url() # Generate... if (self.dryrun): self.logger.info("%s / %s" % (self.dst, path)) else: m = self.manipulator_klass(api_version=self.api_version) try: m.derive(srcfile=self.src, request=r, outfile=os.path.join(self.dst, path)) self.logger.info("%s / %s" % (self.dst, path)) except IIIFZeroSizeError: self.logger.info("%s / %s - zero size, skipped" % (self.dst, path)) return # done if zero size if (r.region_full and use_canonical and height is not None): # In v2.0 of the spec, the canonical URI form `w,` for scaled # images of the full region was introduced. This is somewhat at # odds with the requirement for `w,h` specified in `sizes` to # be available, and has problems of precision with tall narrow # images. Hopefully will be fixed in 3.0 but for now symlink # the `w,h` form to the `w,` dirs so that might use the specified # `w,h` also work. See # <https://github.com/IIIF/iiif.io/issues/544> # # FIXME - This is ugly because we duplicate code in # iiif.request.url to construct the partial URL region_dir = os.path.join(r.quote(r.identifier), "full") wh_dir = "%d,%d" % (r.size_wh[0], height) wh_path = os.path.join(region_dir, wh_dir) wc_dir = "%d," % (r.size_wh[0]) wc_path = os.path.join(region_dir, wc_dir) if (not self.dryrun): ln = os.path.join(self.dst, wh_path) if (os.path.exists(ln)): os.remove(ln) os.symlink(wc_dir, ln) self.logger.info("%s / %s -> %s" % (self.dst, wh_path, wc_path))
python
def generate_file(self, r, undistorted=False): """Generate file for IIIFRequest object r from this image. FIXME - Would be nicer to have the test for an undistorted image request based on the IIIFRequest object, and then know whether to apply canonicalization or not. Logically we might use `w,h` instead of the Image API v2.0 canonical form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume the new canonical form even in the case where the API version is declared earlier. Thus, determine whether to use the canonical or `w,h` form based solely on the setting of osd_version. """ use_canonical = self.get_osd_config(self.osd_version)['use_canonical'] height = None if (undistorted and use_canonical): height = r.size_wh[1] r.size_wh = [r.size_wh[0], None] # [sw,sh] -> [sw,] path = r.url() # Generate... if (self.dryrun): self.logger.info("%s / %s" % (self.dst, path)) else: m = self.manipulator_klass(api_version=self.api_version) try: m.derive(srcfile=self.src, request=r, outfile=os.path.join(self.dst, path)) self.logger.info("%s / %s" % (self.dst, path)) except IIIFZeroSizeError: self.logger.info("%s / %s - zero size, skipped" % (self.dst, path)) return # done if zero size if (r.region_full and use_canonical and height is not None): # In v2.0 of the spec, the canonical URI form `w,` for scaled # images of the full region was introduced. This is somewhat at # odds with the requirement for `w,h` specified in `sizes` to # be available, and has problems of precision with tall narrow # images. Hopefully will be fixed in 3.0 but for now symlink # the `w,h` form to the `w,` dirs so that might use the specified # `w,h` also work. See # <https://github.com/IIIF/iiif.io/issues/544> # # FIXME - This is ugly because we duplicate code in # iiif.request.url to construct the partial URL region_dir = os.path.join(r.quote(r.identifier), "full") wh_dir = "%d,%d" % (r.size_wh[0], height) wh_path = os.path.join(region_dir, wh_dir) wc_dir = "%d," % (r.size_wh[0]) wc_path = os.path.join(region_dir, wc_dir) if (not self.dryrun): ln = os.path.join(self.dst, wh_path) if (os.path.exists(ln)): os.remove(ln) os.symlink(wc_dir, ln) self.logger.info("%s / %s -> %s" % (self.dst, wh_path, wc_path))
Generate file for IIIFRequest object r from this image. FIXME - Would be nicer to have the test for an undistorted image request based on the IIIFRequest object, and then know whether to apply canonicalization or not. Logically we might use `w,h` instead of the Image API v2.0 canonical form `w,` if the api_version is 1.x. However, OSD 1.2.1 and 2.x assume the new canonical form even in the case where the API version is declared earlier. Thus, determine whether to use the canonical or `w,h` form based solely on the setting of osd_version.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L275-L329
zimeon/iiif
iiif/static.py
IIIFStatic.setup_destination
def setup_destination(self): """Setup output directory based on self.dst and self.identifier. Returns the output directory name on success, raises and exception on failure. """ # Do we have a separate identifier? if (not self.identifier): # No separate identifier specified, split off the last path segment # of the source name, strip the extension to get the identifier self.identifier = os.path.splitext(os.path.split(self.src)[1])[0] # Done if dryrun, else setup self.dst first if (self.dryrun): return if (not self.dst): raise IIIFStaticError("No destination directory specified!") dst = self.dst if (os.path.isdir(dst)): # Exists, OK pass elif (os.path.isfile(dst)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % dst) else: os.makedirs(dst) # Second, create identifier based subdir if necessary outd = os.path.join(dst, self.identifier) if (os.path.isdir(outd)): # Nothing for now, perhaps should delete? self.logger.warning( "Output directory %s already exists, adding/updating files" % outd) pass elif (os.path.isfile(outd)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % outd) else: os.makedirs(outd) self.logger.debug("Output directory %s" % outd)
python
def setup_destination(self): """Setup output directory based on self.dst and self.identifier. Returns the output directory name on success, raises and exception on failure. """ # Do we have a separate identifier? if (not self.identifier): # No separate identifier specified, split off the last path segment # of the source name, strip the extension to get the identifier self.identifier = os.path.splitext(os.path.split(self.src)[1])[0] # Done if dryrun, else setup self.dst first if (self.dryrun): return if (not self.dst): raise IIIFStaticError("No destination directory specified!") dst = self.dst if (os.path.isdir(dst)): # Exists, OK pass elif (os.path.isfile(dst)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % dst) else: os.makedirs(dst) # Second, create identifier based subdir if necessary outd = os.path.join(dst, self.identifier) if (os.path.isdir(outd)): # Nothing for now, perhaps should delete? self.logger.warning( "Output directory %s already exists, adding/updating files" % outd) pass elif (os.path.isfile(outd)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % outd) else: os.makedirs(outd) self.logger.debug("Output directory %s" % outd)
Setup output directory based on self.dst and self.identifier. Returns the output directory name on success, raises and exception on failure.
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L331-L368
zimeon/iiif
iiif/static.py
IIIFStatic.write_html
def write_html(self, html_dir='/tmp', include_osd=False, osd_width=500, osd_height=500): """Write HTML test page using OpenSeadragon for the tiles generated. Assumes that the generate(..) method has already been called to set up identifier etc. Parameters: html_dir - output directory for HTML files, will be created if it does not already exist include_osd - true to include OpenSeadragon code osd_width - width of OpenSeadragon pane in pixels osd_height - height of OpenSeadragon pane in pixels """ osd_config = self.get_osd_config(self.osd_version) osd_base = osd_config['base'] osd_dir = osd_config['dir'] # relative to base osd_js = os.path.join(osd_dir, osd_config['js']) osd_images = os.path.join(osd_dir, osd_config['images']) if (os.path.isdir(html_dir)): # Exists, fine pass elif (os.path.isfile(html_dir)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % html_dir) else: os.makedirs(html_dir) self.logger.info("Writing HTML to %s" % (html_dir)) with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f: template = f.read() outfile = self.identifier + '.html' outpath = os.path.join(html_dir, outfile) with open(outpath, 'w') as f: info_json_uri = '/'.join([self.identifier, 'info.json']) if (self.prefix): info_json_uri = '/'.join([self.prefix, info_json_uri]) d = dict(identifier=self.identifier, api_version=self.api_version, osd_version=self.osd_version, osd_uri=osd_js, osd_images_prefix=osd_images, osd_height=osd_width, osd_width=osd_height, info_json_uri=info_json_uri) f.write(Template(template).safe_substitute(d)) self.logger.info("%s / %s" % (html_dir, outfile)) # Do we want to copy OSD in there too? If so, do it only if # we haven't already if (include_osd): if (self.copied_osd): self.logger.info("OpenSeadragon already copied") else: # Make directory, copy JavaScript and icons (from osd_images) osd_path = os.path.join(html_dir, osd_dir) if (not os.path.isdir(osd_path)): os.makedirs(osd_path) shutil.copyfile(os.path.join(osd_base, osd_js), os.path.join(html_dir, osd_js)) self.logger.info("%s / %s" % (html_dir, osd_js)) osd_images_path = os.path.join(html_dir, osd_images) if (os.path.isdir(osd_images_path)): self.logger.warning( "OpenSeadragon images directory (%s) already exists, skipping" % osd_images_path) else: shutil.copytree(os.path.join(osd_base, osd_images), osd_images_path) self.logger.info("%s / %s/*" % (html_dir, osd_images)) self.copied_osd = True
python
def write_html(self, html_dir='/tmp', include_osd=False, osd_width=500, osd_height=500): """Write HTML test page using OpenSeadragon for the tiles generated. Assumes that the generate(..) method has already been called to set up identifier etc. Parameters: html_dir - output directory for HTML files, will be created if it does not already exist include_osd - true to include OpenSeadragon code osd_width - width of OpenSeadragon pane in pixels osd_height - height of OpenSeadragon pane in pixels """ osd_config = self.get_osd_config(self.osd_version) osd_base = osd_config['base'] osd_dir = osd_config['dir'] # relative to base osd_js = os.path.join(osd_dir, osd_config['js']) osd_images = os.path.join(osd_dir, osd_config['images']) if (os.path.isdir(html_dir)): # Exists, fine pass elif (os.path.isfile(html_dir)): raise IIIFStaticError( "Can't write to directory %s: a file of that name exists" % html_dir) else: os.makedirs(html_dir) self.logger.info("Writing HTML to %s" % (html_dir)) with open(os.path.join(self.template_dir, 'static_osd.html'), 'r') as f: template = f.read() outfile = self.identifier + '.html' outpath = os.path.join(html_dir, outfile) with open(outpath, 'w') as f: info_json_uri = '/'.join([self.identifier, 'info.json']) if (self.prefix): info_json_uri = '/'.join([self.prefix, info_json_uri]) d = dict(identifier=self.identifier, api_version=self.api_version, osd_version=self.osd_version, osd_uri=osd_js, osd_images_prefix=osd_images, osd_height=osd_width, osd_width=osd_height, info_json_uri=info_json_uri) f.write(Template(template).safe_substitute(d)) self.logger.info("%s / %s" % (html_dir, outfile)) # Do we want to copy OSD in there too? If so, do it only if # we haven't already if (include_osd): if (self.copied_osd): self.logger.info("OpenSeadragon already copied") else: # Make directory, copy JavaScript and icons (from osd_images) osd_path = os.path.join(html_dir, osd_dir) if (not os.path.isdir(osd_path)): os.makedirs(osd_path) shutil.copyfile(os.path.join(osd_base, osd_js), os.path.join(html_dir, osd_js)) self.logger.info("%s / %s" % (html_dir, osd_js)) osd_images_path = os.path.join(html_dir, osd_images) if (os.path.isdir(osd_images_path)): self.logger.warning( "OpenSeadragon images directory (%s) already exists, skipping" % osd_images_path) else: shutil.copytree(os.path.join(osd_base, osd_images), osd_images_path) self.logger.info("%s / %s/*" % (html_dir, osd_images)) self.copied_osd = True
Write HTML test page using OpenSeadragon for the tiles generated. Assumes that the generate(..) method has already been called to set up identifier etc. Parameters: html_dir - output directory for HTML files, will be created if it does not already exist include_osd - true to include OpenSeadragon code osd_width - width of OpenSeadragon pane in pixels osd_height - height of OpenSeadragon pane in pixels
https://github.com/zimeon/iiif/blob/9d10018d01202fa2a76dfa61598dc6eca07b471f/iiif/static.py#L370-L436
polyaxon/polyaxon-schemas
polyaxon_schemas/utils.py
get_value
def get_value(key, obj, default=missing): """Helper for pulling a keyed value off various types of objects""" if isinstance(key, int): return _get_value_for_key(key, obj, default) return _get_value_for_keys(key.split('.'), obj, default)
python
def get_value(key, obj, default=missing): """Helper for pulling a keyed value off various types of objects""" if isinstance(key, int): return _get_value_for_key(key, obj, default) return _get_value_for_keys(key.split('.'), obj, default)
Helper for pulling a keyed value off various types of objects
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/utils.py#L371-L375
polyaxon/polyaxon-schemas
polyaxon_schemas/utils.py
UnknownSchemaMixin._handle_load_unknown
def _handle_load_unknown(self, data, original): """Preserve unknown keys during deserialization.""" for key, val in original.items(): if key not in self.fields: data[key] = val return data
python
def _handle_load_unknown(self, data, original): """Preserve unknown keys during deserialization.""" for key, val in original.items(): if key not in self.fields: data[key] = val return data
Preserve unknown keys during deserialization.
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/utils.py#L352-L357
polyaxon/polyaxon-schemas
polyaxon_schemas/utils.py
UnknownSchemaMixin._handle_dump_unknown
def _handle_dump_unknown(self, data, original): """Preserve unknown keys during deserialization.""" for key, val in original.items(): if key not in self.fields: data[key] = val return data
python
def _handle_dump_unknown(self, data, original): """Preserve unknown keys during deserialization.""" for key, val in original.items(): if key not in self.fields: data[key] = val return data
Preserve unknown keys during deserialization.
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/utils.py#L360-L365
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/libs/validator.py
validate_headers
def validate_headers(spec, data): """Validates headers data and creates the config objects""" validated_data = { spec.VERSION: data[spec.VERSION], spec.KIND: data[spec.KIND], } if data.get(spec.LOGGING): validated_data[spec.LOGGING] = LoggingConfig.from_dict( data[spec.LOGGING]) if data.get(spec.TAGS): validated_data[spec.TAGS] = data[spec.TAGS] if data.get(spec.HP_TUNING): validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict( data[spec.HP_TUNING]) return validated_data
python
def validate_headers(spec, data): """Validates headers data and creates the config objects""" validated_data = { spec.VERSION: data[spec.VERSION], spec.KIND: data[spec.KIND], } if data.get(spec.LOGGING): validated_data[spec.LOGGING] = LoggingConfig.from_dict( data[spec.LOGGING]) if data.get(spec.TAGS): validated_data[spec.TAGS] = data[spec.TAGS] if data.get(spec.HP_TUNING): validated_data[spec.HP_TUNING] = HPTuningConfig.from_dict( data[spec.HP_TUNING]) return validated_data
Validates headers data and creates the config objects
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/libs/validator.py#L16-L34
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/libs/validator.py
validate
def validate(spec, data): """Validates the data and creates the config objects""" data = copy.deepcopy(data) validated_data = {} def validate_keys(section, config, section_data): if not isinstance(section_data, dict) or section == spec.MODEL: return extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields] if extra_args: raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format( section, extra_args)) def add_validated_section(section, config): if data.get(section): section_data = data[section] validate_keys(section=section, config=config, section_data=section_data) validated_data[section] = config.from_dict(section_data) add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG) add_validated_section(spec.BUILD, BuildConfig) add_validated_section(spec.RUN, RunConfig) add_validated_section(spec.MODEL, ModelConfig) add_validated_section(spec.TRAIN, TrainConfig) add_validated_section(spec.EVAL, EvalConfig) return validated_data
python
def validate(spec, data): """Validates the data and creates the config objects""" data = copy.deepcopy(data) validated_data = {} def validate_keys(section, config, section_data): if not isinstance(section_data, dict) or section == spec.MODEL: return extra_args = [key for key in section_data.keys() if key not in config.SCHEMA().fields] if extra_args: raise PolyaxonfileError('Extra arguments passed for `{}`: {}'.format( section, extra_args)) def add_validated_section(section, config): if data.get(section): section_data = data[section] validate_keys(section=section, config=config, section_data=section_data) validated_data[section] = config.from_dict(section_data) add_validated_section(spec.ENVIRONMENT, spec.ENVIRONMENT_CONFIG) add_validated_section(spec.BUILD, BuildConfig) add_validated_section(spec.RUN, RunConfig) add_validated_section(spec.MODEL, ModelConfig) add_validated_section(spec.TRAIN, TrainConfig) add_validated_section(spec.EVAL, EvalConfig) return validated_data
Validates the data and creates the config objects
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/libs/validator.py#L37-L64
polyaxon/polyaxon-schemas
polyaxon_schemas/ops/experiment.py
ExperimentSchema.validate_replicas
def validate_replicas(self, data): """Validate distributed experiment""" environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
python
def validate_replicas(self, data): """Validate distributed experiment""" environment = data.get('environment') if environment and environment.replicas: validate_replicas(data.get('framework'), environment.replicas)
Validate distributed experiment
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/ops/experiment.py#L89-L93
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/group.py
GroupSpecification.get_experiment_spec
def get_experiment_spec(self, matrix_declaration): """Returns an experiment spec for this group spec and the given matrix declaration.""" parsed_data = Parser.parse(self, self._data, matrix_declaration) del parsed_data[self.HP_TUNING] validator.validate(spec=self, data=parsed_data) return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
python
def get_experiment_spec(self, matrix_declaration): """Returns an experiment spec for this group spec and the given matrix declaration.""" parsed_data = Parser.parse(self, self._data, matrix_declaration) del parsed_data[self.HP_TUNING] validator.validate(spec=self, data=parsed_data) return ExperimentSpecification(values=[parsed_data, {'kind': self._EXPERIMENT}])
Returns an experiment spec for this group spec and the given matrix declaration.
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/group.py#L75-L80
polyaxon/polyaxon-schemas
polyaxon_schemas/specs/group.py
GroupSpecification.get_build_spec
def get_build_spec(self): """Returns a build spec for this group spec.""" if BaseSpecification.BUILD not in self._data: return None return BuildConfig.from_dict(self._data[BaseSpecification.BUILD])
python
def get_build_spec(self): """Returns a build spec for this group spec.""" if BaseSpecification.BUILD not in self._data: return None return BuildConfig.from_dict(self._data[BaseSpecification.BUILD])
Returns a build spec for this group spec.
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/specs/group.py#L82-L86
polyaxon/polyaxon-schemas
polyaxon_schemas/ops/hptuning.py
HPTuningSchema.validate_matrix
def validate_matrix(self, data): """Validates matrix data and creates the config objects""" is_grid_search = ( data.get('grid_search') is not None or (data.get('grid_search') is None and data.get('random_search') is None and data.get('hyperband') is None and data.get('bo') is None) ) is_bo = data.get('bo') is not None validate_matrix(data.get('matrix'), is_grid_search=is_grid_search, is_bo=is_bo)
python
def validate_matrix(self, data): """Validates matrix data and creates the config objects""" is_grid_search = ( data.get('grid_search') is not None or (data.get('grid_search') is None and data.get('random_search') is None and data.get('hyperband') is None and data.get('bo') is None) ) is_bo = data.get('bo') is not None validate_matrix(data.get('matrix'), is_grid_search=is_grid_search, is_bo=is_bo)
Validates matrix data and creates the config objects
https://github.com/polyaxon/polyaxon-schemas/blob/a5360240316f4bbccfcdcb97a489cab14458277a/polyaxon_schemas/ops/hptuning.py#L371-L381
inorton/junit2html
junit2htmlreport/parser.py
AnchorBase.anchor
def anchor(self): """ Generate a html anchor name :return: """ if not self._anchor: self._anchor = str(uuid.uuid4()) return self._anchor
python
def anchor(self): """ Generate a html anchor name :return: """ if not self._anchor: self._anchor = str(uuid.uuid4()) return self._anchor
Generate a html anchor name :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L24-L31
inorton/junit2html
junit2htmlreport/parser.py
Class.html
def html(self): """ Render this test class as html :return: """ cases = [x.html() for x in self.cases] return """ <hr size="2"/> <a name="{anchor}"> <div class="testclass"> <div>Test Class: {name}</div> <div class="testcases"> {cases} </div> </div> </a> """.format(anchor=self.anchor(), name=tag.text(self.name), count=len(cases), cases="".join(cases))
python
def html(self): """ Render this test class as html :return: """ cases = [x.html() for x in self.cases] return """ <hr size="2"/> <a name="{anchor}"> <div class="testclass"> <div>Test Class: {name}</div> <div class="testcases"> {cases} </div> </div> </a> """.format(anchor=self.anchor(), name=tag.text(self.name), count=len(cases), cases="".join(cases))
Render this test class as html :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L43-L63
inorton/junit2html
junit2htmlreport/parser.py
Property.html
def html(self): """ Render those properties as html :return: """ return """ <div class="property"><i>{name}</i><br/> <pre>{value}</pre></div> """.format(name=tag.text(self.name), value=tag.text(self.value))
python
def html(self): """ Render those properties as html :return: """ return """ <div class="property"><i>{name}</i><br/> <pre>{value}</pre></div> """.format(name=tag.text(self.name), value=tag.text(self.value))
Render those properties as html :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L74-L82
inorton/junit2html
junit2htmlreport/parser.py
Case.html
def html(self): """ Render this test case as HTML :return: """ failure = "" skipped = None stdout = tag.text(self.stdout) stderr = tag.text(self.stderr) if self.skipped: skipped = """ <hr size="1"/> <div class="skipped"><b>Skipped: {msg}</b><br/> <pre>{skip}</pre> </div> """.format(msg=tag.text(self.skipped_msg), skip=tag.text(self.skipped)) if self.failed(): failure = """ <hr size="1"/> <div class="failure"><b>Failed: {msg}</b><br/> <pre>{fail}</pre> </div> """.format(msg=tag.text(self.failure_msg), fail=tag.text(self.failure)) properties = [x.html() for x in self.properties] return """ <a name="{anchor}"> <div class="testcase"> <div class="details"> <span class="testname"><b>{testname}</b></span><br/> <span class="testclassname">{testclassname}</span><br/> <span class="duration">Time Taken: {duration}s</span> </div> {skipped} {failure} <hr size="1"/> {properties} <div class="stdout"><i>Stdout</i><br/> <pre>{stdout}</pre></div> <hr size="1"/> <div class="stderr"><i>Stderr</i><br/> <pre>{stderr}</pre></div> </div> </a> """.format(anchor=self.anchor(), testname=self.name, testclassname=self.testclass.name, duration=self.duration, failure=failure, skipped=skipped, properties="".join(properties), stdout=stdout, stderr=stderr)
python
def html(self): """ Render this test case as HTML :return: """ failure = "" skipped = None stdout = tag.text(self.stdout) stderr = tag.text(self.stderr) if self.skipped: skipped = """ <hr size="1"/> <div class="skipped"><b>Skipped: {msg}</b><br/> <pre>{skip}</pre> </div> """.format(msg=tag.text(self.skipped_msg), skip=tag.text(self.skipped)) if self.failed(): failure = """ <hr size="1"/> <div class="failure"><b>Failed: {msg}</b><br/> <pre>{fail}</pre> </div> """.format(msg=tag.text(self.failure_msg), fail=tag.text(self.failure)) properties = [x.html() for x in self.properties] return """ <a name="{anchor}"> <div class="testcase"> <div class="details"> <span class="testname"><b>{testname}</b></span><br/> <span class="testclassname">{testclassname}</span><br/> <span class="duration">Time Taken: {duration}s</span> </div> {skipped} {failure} <hr size="1"/> {properties} <div class="stdout"><i>Stdout</i><br/> <pre>{stdout}</pre></div> <hr size="1"/> <div class="stderr"><i>Stderr</i><br/> <pre>{stderr}</pre></div> </div> </a> """.format(anchor=self.anchor(), testname=self.name, testclassname=self.testclass.name, duration=self.duration, failure=failure, skipped=skipped, properties="".join(properties), stdout=stdout, stderr=stderr)
Render this test case as HTML :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L108-L165
inorton/junit2html
junit2htmlreport/parser.py
Suite.all
def all(self): """ Return all testcases :return: """ tests = list() for testclass in self.classes: tests.extend(self.classes[testclass].cases) return tests
python
def all(self): """ Return all testcases :return: """ tests = list() for testclass in self.classes: tests.extend(self.classes[testclass].cases) return tests
Return all testcases :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L208-L216
inorton/junit2html
junit2htmlreport/parser.py
Suite.passed
def passed(self): """ Return all the passing testcases :return: """ return [test for test in self.all() if not test.failed() and not test.skipped()]
python
def passed(self): """ Return all the passing testcases :return: """ return [test for test in self.all() if not test.failed() and not test.skipped()]
Return all the passing testcases :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L232-L237
inorton/junit2html
junit2htmlreport/parser.py
Suite.toc
def toc(self): """ Return a html table of contents :return: """ fails = "" skips = "" if len(self.failed()): faillist = list() for failure in self.failed(): faillist.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=failure.anchor(), name=tag.text( failure.testclass.name + '.' + failure.name))) fails = """ <li>Failures <ul>{faillist}</ul> </li> """.format(faillist="".join(faillist)) if len(self.skipped()): skiplist = list() for skipped in self.skipped(): skiplist.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=skipped.anchor(), name=tag.text( skipped.testclass.name + skipped.name))) skips = """ <li>Skipped <ul>{skiplist}</ul> </li> """.format(skiplist="".join(skiplist)) classlist = list() for classname in self.classes: testclass = self.classes[classname] cases = list() for testcase in testclass.cases: if "pkcs11" in testcase.name: assert True cases.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=testcase.anchor(), name=tag.text(testcase.name))) classlist.append(""" <li> <a href="#{anchor}">{name}</a> <ul> {cases} </ul> </li> """.format(anchor=testclass.anchor(), name=testclass.name, cases="".join(cases))) return """ <ul> {failed} {skips} <li>All Test Classes <ul>{classlist}</ul> </li> </ul> """.format(failed=fails, skips=skips, classlist="".join(classlist))
python
def toc(self): """ Return a html table of contents :return: """ fails = "" skips = "" if len(self.failed()): faillist = list() for failure in self.failed(): faillist.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=failure.anchor(), name=tag.text( failure.testclass.name + '.' + failure.name))) fails = """ <li>Failures <ul>{faillist}</ul> </li> """.format(faillist="".join(faillist)) if len(self.skipped()): skiplist = list() for skipped in self.skipped(): skiplist.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=skipped.anchor(), name=tag.text( skipped.testclass.name + skipped.name))) skips = """ <li>Skipped <ul>{skiplist}</ul> </li> """.format(skiplist="".join(skiplist)) classlist = list() for classname in self.classes: testclass = self.classes[classname] cases = list() for testcase in testclass.cases: if "pkcs11" in testcase.name: assert True cases.append( """ <li> <a href="#{anchor}">{name}</a> </li> """.format(anchor=testcase.anchor(), name=tag.text(testcase.name))) classlist.append(""" <li> <a href="#{anchor}">{name}</a> <ul> {cases} </ul> </li> """.format(anchor=testclass.anchor(), name=testclass.name, cases="".join(cases))) return """ <ul> {failed} {skips} <li>All Test Classes <ul>{classlist}</ul> </li> </ul> """.format(failed=fails, skips=skips, classlist="".join(classlist))
Return a html table of contents :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L239-L321
inorton/junit2html
junit2htmlreport/parser.py
Suite.html
def html(self): """ Render this as html. :return: """ classes = list() package = "" if self.package is not None: package = "Package: " + self.package + "<br/>" for classname in self.classes: classes.append(self.classes[classname].html()) errs = "" for error in self.errors: if not len(errs): errs += "<tr><th colspan='2' align='left'>Errors</th></tr>" for part in ["type", "message", "text"]: if part in error: errs += "<tr><td>{}</td><td><pre>{}</pre></td></tr>".format( part, tag.text(error[part])) stdio = "" if self.stderr or self.stdout: stdio += "<tr><th colspan='2' align='left'>Output</th></tr>" if self.stderr: stdio += "<tr><td>Stderr</td><td><pre>{}</pre></td></tr>".format( tag.text(self.stderr)) if self.stdout: stdio += "<tr><td>Stdout</td><td><pre>{}</pre></td></tr>".format( tag.text(self.stdout)) props = "" if len(self.properties): props += "<table>" propnames = sorted(self.properties) for prop in propnames: props += "<tr><th>{}</th><td>{}</td></tr>".format(prop, self.properties[prop]) props += "</table>" return """ <div class="testsuite"> <h2>Test Suite: {name}</h2><a name="{anchor}"> {package} {properties} <table> <tr><th align="left">Duration</th><td align="right">{duration} sec</td></tr> <tr><th align="left">Test Cases</th><td align="right">{count}</td></tr> <tr><th align="left">Failures</th><td align="right">{fails}</td></tr> {errs} {stdio} </table> <a name="toc"></a> <h2>Results Index</h2> {toc} <hr size="2"/> <h2>Test Results</h2> <div class="testclasses"> {classes} </div> </div> """.format(name=tag.text(self.name), anchor=self.anchor(), duration=self.duration, errs=errs, stdio=stdio, toc=self.toc(), package=package, properties=props, classes="".join(classes), count=len(self.all()), fails=len(self.failed()))
python
def html(self): """ Render this as html. :return: """ classes = list() package = "" if self.package is not None: package = "Package: " + self.package + "<br/>" for classname in self.classes: classes.append(self.classes[classname].html()) errs = "" for error in self.errors: if not len(errs): errs += "<tr><th colspan='2' align='left'>Errors</th></tr>" for part in ["type", "message", "text"]: if part in error: errs += "<tr><td>{}</td><td><pre>{}</pre></td></tr>".format( part, tag.text(error[part])) stdio = "" if self.stderr or self.stdout: stdio += "<tr><th colspan='2' align='left'>Output</th></tr>" if self.stderr: stdio += "<tr><td>Stderr</td><td><pre>{}</pre></td></tr>".format( tag.text(self.stderr)) if self.stdout: stdio += "<tr><td>Stdout</td><td><pre>{}</pre></td></tr>".format( tag.text(self.stdout)) props = "" if len(self.properties): props += "<table>" propnames = sorted(self.properties) for prop in propnames: props += "<tr><th>{}</th><td>{}</td></tr>".format(prop, self.properties[prop]) props += "</table>" return """ <div class="testsuite"> <h2>Test Suite: {name}</h2><a name="{anchor}"> {package} {properties} <table> <tr><th align="left">Duration</th><td align="right">{duration} sec</td></tr> <tr><th align="left">Test Cases</th><td align="right">{count}</td></tr> <tr><th align="left">Failures</th><td align="right">{fails}</td></tr> {errs} {stdio} </table> <a name="toc"></a> <h2>Results Index</h2> {toc} <hr size="2"/> <h2>Test Results</h2> <div class="testclasses"> {classes} </div> </div> """.format(name=tag.text(self.name), anchor=self.anchor(), duration=self.duration, errs=errs, stdio=stdio, toc=self.toc(), package=package, properties=props, classes="".join(classes), count=len(self.all()), fails=len(self.failed()))
Render this as html. :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L323-L396
inorton/junit2html
junit2htmlreport/parser.py
Junit.get_css
def get_css(self): """ Return the content of the css file :return: """ thisdir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(thisdir, self.css), "r") as cssfile: return cssfile.read()
python
def get_css(self): """ Return the content of the css file :return: """ thisdir = os.path.dirname(os.path.abspath(__file__)) with open(os.path.join(thisdir, self.css), "r") as cssfile: return cssfile.read()
Return the content of the css file :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L430-L437
inorton/junit2html
junit2htmlreport/parser.py
Junit.process
def process(self): """ populate the report from the xml :return: """ suites = None if isinstance(self.tree, ET.Element): root = self.tree else: root = self.tree.getroot() if root.tag == "testrun": root = root[0] if root.tag == "testsuite": suites = [root] if root.tag == "testsuites": suites = [x for x in root] assert suites, "could not find test suites in results xml" for suite in suites: cursuite = Suite() self.suites.append(cursuite) cursuite.name = suite.attrib["name"] if "package" in suite.attrib: cursuite.package = suite.attrib["package"] cursuite.duration = float(suite.attrib.get("time", '0').replace(',','')) for element in suite: if element.tag == "error": # top level error? errtag = { "message": element.attrib.get("message", ""), "type": element.attrib.get("type", ""), "text": element.text } cursuite.errors.append(errtag) if element.tag == "system-out": cursuite.stdout = element.text if element.tag == "system-err": cursuite.stderr = element.text if element.tag == "properties": for prop in element: if prop.tag == "property": cursuite.properties[prop.attrib["name"]] = prop.attrib["value"] if element.tag == "testcase": testcase = element if not testcase.attrib.get("classname", None): testcase.attrib["classname"] = NO_CLASSNAME if testcase.attrib["classname"] not in cursuite: testclass = Class() testclass.name = testcase.attrib["classname"] cursuite[testclass.name] = testclass testclass = cursuite[testcase.attrib["classname"]] newcase = Case() newcase.name = testcase.attrib["name"] newcase.testclass = testclass newcase.duration = float(testcase.attrib.get("time", '0').replace(',','')) testclass.cases.append(newcase) # does this test case have any children? for child in testcase: if child.tag == "skipped": newcase.skipped = child.text if "message" in child.attrib: newcase.skipped_msg = child.attrib["message"] elif child.tag == "system-out": newcase.stdout = child.text elif child.tag == "system-err": newcase.stderr = child.text elif child.tag == "failure": newcase.failure = child.text if "message" in child.attrib: newcase.failure_msg = child.attrib["message"] elif child.tag == "error": newcase.failure = child.text if "message" in child.attrib: newcase.failure_msg = child.attrib["message"] elif child.tag == "properties": for property in child: newproperty = Property() newproperty.name = property.attrib["name"] newproperty.value = property.attrib["value"] newcase.properties.append(newproperty)
python
def process(self): """ populate the report from the xml :return: """ suites = None if isinstance(self.tree, ET.Element): root = self.tree else: root = self.tree.getroot() if root.tag == "testrun": root = root[0] if root.tag == "testsuite": suites = [root] if root.tag == "testsuites": suites = [x for x in root] assert suites, "could not find test suites in results xml" for suite in suites: cursuite = Suite() self.suites.append(cursuite) cursuite.name = suite.attrib["name"] if "package" in suite.attrib: cursuite.package = suite.attrib["package"] cursuite.duration = float(suite.attrib.get("time", '0').replace(',','')) for element in suite: if element.tag == "error": # top level error? errtag = { "message": element.attrib.get("message", ""), "type": element.attrib.get("type", ""), "text": element.text } cursuite.errors.append(errtag) if element.tag == "system-out": cursuite.stdout = element.text if element.tag == "system-err": cursuite.stderr = element.text if element.tag == "properties": for prop in element: if prop.tag == "property": cursuite.properties[prop.attrib["name"]] = prop.attrib["value"] if element.tag == "testcase": testcase = element if not testcase.attrib.get("classname", None): testcase.attrib["classname"] = NO_CLASSNAME if testcase.attrib["classname"] not in cursuite: testclass = Class() testclass.name = testcase.attrib["classname"] cursuite[testclass.name] = testclass testclass = cursuite[testcase.attrib["classname"]] newcase = Case() newcase.name = testcase.attrib["name"] newcase.testclass = testclass newcase.duration = float(testcase.attrib.get("time", '0').replace(',','')) testclass.cases.append(newcase) # does this test case have any children? for child in testcase: if child.tag == "skipped": newcase.skipped = child.text if "message" in child.attrib: newcase.skipped_msg = child.attrib["message"] elif child.tag == "system-out": newcase.stdout = child.text elif child.tag == "system-err": newcase.stderr = child.text elif child.tag == "failure": newcase.failure = child.text if "message" in child.attrib: newcase.failure_msg = child.attrib["message"] elif child.tag == "error": newcase.failure = child.text if "message" in child.attrib: newcase.failure_msg = child.attrib["message"] elif child.tag == "properties": for property in child: newproperty = Property() newproperty.name = property.attrib["name"] newproperty.value = property.attrib["value"] newcase.properties.append(newproperty)
populate the report from the xml :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L439-L529
inorton/junit2html
junit2htmlreport/parser.py
Junit.toc
def toc(self): """ If this report has multiple suite results, make a table of contents listing each suite :return: """ if len(self.suites) > 1: tochtml = "<ul>" for suite in self.suites: tochtml += '<li><a href="#{anchor}">{name}</a></li>'.format( anchor=suite.anchor(), name=tag.text(suite.name)) tochtml += "</ul>" return tochtml else: return ""
python
def toc(self): """ If this report has multiple suite results, make a table of contents listing each suite :return: """ if len(self.suites) > 1: tochtml = "<ul>" for suite in self.suites: tochtml += '<li><a href="#{anchor}">{name}</a></li>'.format( anchor=suite.anchor(), name=tag.text(suite.name)) tochtml += "</ul>" return tochtml else: return ""
If this report has multiple suite results, make a table of contents listing each suite :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L545-L559
inorton/junit2html
junit2htmlreport/parser.py
Junit.html
def html(self): """ Render the test suite as a HTML report with links to errors first. :return: """ page = self.get_html_head() page += "<body><h1>Test Report</h1>" page += self.toc() for suite in self.suites: page += suite.html() page += "</body></html>" return page
python
def html(self): """ Render the test suite as a HTML report with links to errors first. :return: """ page = self.get_html_head() page += "<body><h1>Test Report</h1>" page += self.toc() for suite in self.suites: page += suite.html() page += "</body></html>" return page
Render the test suite as a HTML report with links to errors first. :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/parser.py#L561-L574
inorton/junit2html
junit2htmlreport/runner.py
run
def run(args): """ Run this tool :param args: :return: """ (opts, args) = PARSER.parse_args(args) if args else PARSER.parse_args() if not len(args): PARSER.print_usage() sys.exit(1) outfilename = args[0] + ".html" if len(args) > 1: outfilename = args[1] report = parser.Junit(args[0]) html = report.html() with open(outfilename, "wb") as outfile: outfile.write(html.encode('utf-8'))
python
def run(args): """ Run this tool :param args: :return: """ (opts, args) = PARSER.parse_args(args) if args else PARSER.parse_args() if not len(args): PARSER.print_usage() sys.exit(1) outfilename = args[0] + ".html" if len(args) > 1: outfilename = args[1] report = parser.Junit(args[0]) html = report.html() with open(outfilename, "wb") as outfile: outfile.write(html.encode('utf-8'))
Run this tool :param args: :return:
https://github.com/inorton/junit2html/blob/73ff9d84c41b60148e86ce597ef605a0f1976d4b/junit2htmlreport/runner.py#L14-L33
tompollard/tableone
tableone.py
TableOne._generate_remark_str
def _generate_remark_str(self, end_of_line = '\n'): """ Generate a series of remarks that the user should consider when interpreting the summary statistics. """ warnings = {} msg = '{}'.format(end_of_line) # generate warnings for continuous variables if self._continuous: # highlight far outliers outlier_mask = self.cont_describe.far_outliers > 1 outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index) if outlier_vars: warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars # highlight possible multimodal distributions using hartigan's dip test # -1 values indicate NaN modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05) modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars # highlight non normal distributions # -1 values indicate NaN modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001) modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars # create the warning string for n,k in enumerate(sorted(warnings)): msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line) return msg
python
def _generate_remark_str(self, end_of_line = '\n'): """ Generate a series of remarks that the user should consider when interpreting the summary statistics. """ warnings = {} msg = '{}'.format(end_of_line) # generate warnings for continuous variables if self._continuous: # highlight far outliers outlier_mask = self.cont_describe.far_outliers > 1 outlier_vars = list(self.cont_describe.far_outliers[outlier_mask].dropna(how='all').index) if outlier_vars: warnings["Warning, Tukey test indicates far outliers in"] = outlier_vars # highlight possible multimodal distributions using hartigan's dip test # -1 values indicate NaN modal_mask = (self.cont_describe.diptest >= 0) & (self.cont_describe.diptest <= 0.05) modal_vars = list(self.cont_describe.diptest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, Hartigan's Dip Test reports possible multimodal distributions for"] = modal_vars # highlight non normal distributions # -1 values indicate NaN modal_mask = (self.cont_describe.normaltest >= 0) & (self.cont_describe.normaltest <= 0.001) modal_vars = list(self.cont_describe.normaltest[modal_mask].dropna(how='all').index) if modal_vars: warnings["Warning, test for normality reports non-normal distributions for"] = modal_vars # create the warning string for n,k in enumerate(sorted(warnings)): msg += '[{}] {}: {}.{}'.format(n+1,k,', '.join(warnings[k]), end_of_line) return msg
Generate a series of remarks that the user should consider when interpreting the summary statistics.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L200-L235
tompollard/tableone
tableone.py
TableOne._detect_categorical_columns
def _detect_categorical_columns(self,data): """ Detect categorical columns if they are not specified. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- likely_cat : list List of variables that appear to be categorical. """ # assume all non-numerical and date columns are categorical numeric_cols = set(data._get_numeric_data().columns.values) date_cols = set(data.select_dtypes(include=[np.datetime64]).columns) likely_cat = set(data.columns) - numeric_cols likely_cat = list(likely_cat - date_cols) # check proportion of unique values if numerical for var in data._get_numeric_data().columns: likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05 if likely_flag: likely_cat.append(var) return likely_cat
python
def _detect_categorical_columns(self,data): """ Detect categorical columns if they are not specified. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- likely_cat : list List of variables that appear to be categorical. """ # assume all non-numerical and date columns are categorical numeric_cols = set(data._get_numeric_data().columns.values) date_cols = set(data.select_dtypes(include=[np.datetime64]).columns) likely_cat = set(data.columns) - numeric_cols likely_cat = list(likely_cat - date_cols) # check proportion of unique values if numerical for var in data._get_numeric_data().columns: likely_flag = 1.0 * data[var].nunique()/data[var].count() < 0.05 if likely_flag: likely_cat.append(var) return likely_cat
Detect categorical columns if they are not specified. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- likely_cat : list List of variables that appear to be categorical.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L237-L261
tompollard/tableone
tableone.py
TableOne._std
def _std(self,x): """ Compute standard deviation with ddof degrees of freedom """ return np.nanstd(x.values,ddof=self._ddof)
python
def _std(self,x): """ Compute standard deviation with ddof degrees of freedom """ return np.nanstd(x.values,ddof=self._ddof)
Compute standard deviation with ddof degrees of freedom
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L275-L279
tompollard/tableone
tableone.py
TableOne._tukey
def _tukey(self,x,threshold): """ Count outliers according to Tukey's rule. Where Q1 is the lower quartile and Q3 is the upper quartile, an outlier is an observation outside of the range: [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] k = 1.5 indicates an outlier k = 3.0 indicates an outlier that is "far out" """ vals = x.values[~np.isnan(x.values)] try: q1, q3 = np.percentile(vals, [25, 75]) iqr = q3 - q1 low_bound = q1 - (iqr * threshold) high_bound = q3 + (iqr * threshold) outliers = np.where((vals > high_bound) | (vals < low_bound)) except: outliers = [] return outliers
python
def _tukey(self,x,threshold): """ Count outliers according to Tukey's rule. Where Q1 is the lower quartile and Q3 is the upper quartile, an outlier is an observation outside of the range: [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] k = 1.5 indicates an outlier k = 3.0 indicates an outlier that is "far out" """ vals = x.values[~np.isnan(x.values)] try: q1, q3 = np.percentile(vals, [25, 75]) iqr = q3 - q1 low_bound = q1 - (iqr * threshold) high_bound = q3 + (iqr * threshold) outliers = np.where((vals > high_bound) | (vals < low_bound)) except: outliers = [] return outliers
Count outliers according to Tukey's rule. Where Q1 is the lower quartile and Q3 is the upper quartile, an outlier is an observation outside of the range: [Q1 - k(Q3 - Q1), Q3 + k(Q3 - Q1)] k = 1.5 indicates an outlier k = 3.0 indicates an outlier that is "far out"
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L311-L332
tompollard/tableone
tableone.py
TableOne._outliers
def _outliers(self,x): """ Compute number of outliers """ outliers = self._tukey(x, threshold = 1.5) return np.size(outliers)
python
def _outliers(self,x): """ Compute number of outliers """ outliers = self._tukey(x, threshold = 1.5) return np.size(outliers)
Compute number of outliers
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L334-L339
tompollard/tableone
tableone.py
TableOne._far_outliers
def _far_outliers(self,x): """ Compute number of "far out" outliers """ outliers = self._tukey(x, threshold = 3.0) return np.size(outliers)
python
def _far_outliers(self,x): """ Compute number of "far out" outliers """ outliers = self._tukey(x, threshold = 3.0) return np.size(outliers)
Compute number of "far out" outliers
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L341-L346
tompollard/tableone
tableone.py
TableOne._t1_summary
def _t1_summary(self,x): """ Compute median [IQR] or mean (Std) for the input series. Parameters ---------- x : pandas Series Series of values to be summarised. """ # set decimal places if isinstance(self._decimals,int): n = self._decimals elif isinstance(self._decimals,dict): try: n = self._decimals[x.name] except: n = 1 else: n = 1 warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n)) if x.name in self._nonnormal: f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n) return f.format(np.nanmedian(x.values), np.nanpercentile(x.values,25), np.nanpercentile(x.values,75)) else: f = '{{:.{}f}} ({{:.{}f}})'.format(n,n) return f.format(np.nanmean(x.values), np.nanstd(x.values,ddof=self._ddof))
python
def _t1_summary(self,x): """ Compute median [IQR] or mean (Std) for the input series. Parameters ---------- x : pandas Series Series of values to be summarised. """ # set decimal places if isinstance(self._decimals,int): n = self._decimals elif isinstance(self._decimals,dict): try: n = self._decimals[x.name] except: n = 1 else: n = 1 warnings.warn('The decimals arg must be an int or dict. Defaulting to {} d.p.'.format(n)) if x.name in self._nonnormal: f = '{{:.{}f}} [{{:.{}f}},{{:.{}f}}]'.format(n,n,n) return f.format(np.nanmedian(x.values), np.nanpercentile(x.values,25), np.nanpercentile(x.values,75)) else: f = '{{:.{}f}} ({{:.{}f}})'.format(n,n) return f.format(np.nanmean(x.values), np.nanstd(x.values,ddof=self._ddof))
Compute median [IQR] or mean (Std) for the input series. Parameters ---------- x : pandas Series Series of values to be summarised.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L348-L376
tompollard/tableone
tableone.py
TableOne._create_cont_describe
def _create_cont_describe(self,data): """ Describe the continuous data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cont : pandas DataFrame Summarise the continuous variables. """ aggfuncs = [pd.Series.count,np.mean,np.median,self._std, self._q25,self._q75,min,max,self._t1_summary,self._diptest, self._outliers,self._far_outliers,self._normaltest] # coerce continuous data to numeric cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce') # check all data in each continuous column is numeric bad_cols = cont_data.count() != data[self._continuous].count() bad_cols = cont_data.columns[bad_cols] if len(bad_cols)>0: raise InputError("""The following continuous column(s) have non-numeric values: {}. Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values)) # check for coerced column containing all NaN to warn user for column in cont_data.columns[cont_data.count() == 0]: self._non_continuous_warning(column) if self._groupby: # add the groupby column back cont_data = cont_data.merge(data[[self._groupby]], left_index=True, right_index=True) # group and aggregate data df_cont = pd.pivot_table(cont_data, columns=[self._groupby], aggfunc=aggfuncs) else: # if no groupby, just add single group column df_cont = cont_data.apply(aggfuncs).T df_cont.columns.name = 'overall' df_cont.columns = pd.MultiIndex.from_product([df_cont.columns, ['overall']]) df_cont.index.rename('variable',inplace=True) # remove prefix underscore from column names (e.g. _std -> std) agg_rename = df_cont.columns.levels[0] agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename] df_cont.columns.set_levels(agg_rename, level=0, inplace=True) return df_cont
python
def _create_cont_describe(self,data): """ Describe the continuous data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cont : pandas DataFrame Summarise the continuous variables. """ aggfuncs = [pd.Series.count,np.mean,np.median,self._std, self._q25,self._q75,min,max,self._t1_summary,self._diptest, self._outliers,self._far_outliers,self._normaltest] # coerce continuous data to numeric cont_data = data[self._continuous].apply(pd.to_numeric, errors='coerce') # check all data in each continuous column is numeric bad_cols = cont_data.count() != data[self._continuous].count() bad_cols = cont_data.columns[bad_cols] if len(bad_cols)>0: raise InputError("""The following continuous column(s) have non-numeric values: {}. Either specify the column(s) as categorical or remove the non-numeric values.""".format(bad_cols.values)) # check for coerced column containing all NaN to warn user for column in cont_data.columns[cont_data.count() == 0]: self._non_continuous_warning(column) if self._groupby: # add the groupby column back cont_data = cont_data.merge(data[[self._groupby]], left_index=True, right_index=True) # group and aggregate data df_cont = pd.pivot_table(cont_data, columns=[self._groupby], aggfunc=aggfuncs) else: # if no groupby, just add single group column df_cont = cont_data.apply(aggfuncs).T df_cont.columns.name = 'overall' df_cont.columns = pd.MultiIndex.from_product([df_cont.columns, ['overall']]) df_cont.index.rename('variable',inplace=True) # remove prefix underscore from column names (e.g. _std -> std) agg_rename = df_cont.columns.levels[0] agg_rename = [x[1:] if x[0]=='_' else x for x in agg_rename] df_cont.columns.set_levels(agg_rename, level=0, inplace=True) return df_cont
Describe the continuous data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cont : pandas DataFrame Summarise the continuous variables.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L378-L432
tompollard/tableone
tableone.py
TableOne._create_cat_describe
def _create_cat_describe(self,data): """ Describe the categorical data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cat : pandas DataFrame Summarise the categorical variables. """ group_dict = {} for g in self._groupbylvls: if self._groupby: d_slice = data.loc[data[self._groupby] == g, self._categorical] else: d_slice = data[self._categorical].copy() # create a dataframe with freq, proportion df = d_slice.copy() # convert type to string to avoid int converted to boolean, avoiding nans for column in df.columns: df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values] df = df.melt().groupby(['variable','value']).size().to_frame(name='freq') df.index.set_names('level', level=1, inplace=True) df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100 # set number of decimal places for percent if isinstance(self._decimals,int): n = self._decimals f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) elif isinstance(self._decimals,dict): df.loc[:,'percent'] = df.apply(self._format_cat, axis=1) else: n = 1 f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) # add n column, listing total non-null values for each variable ct = d_slice.count().to_frame(name='n') ct.index.name = 'variable' df = df.join(ct) # add null count nulls = d_slice.isnull().sum().to_frame(name='isnull') nulls.index.name = 'variable' # only save null count to the first category for each variable # do this by extracting the first category from the df row index levels = df.reset_index()[['variable','level']].groupby('variable').first() # add this category to the nulls table nulls = nulls.join(levels) nulls.set_index('level', append=True, inplace=True) # join nulls to categorical df = df.join(nulls) # add summary column df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')' # add to dictionary group_dict[g] = df df_cat = pd.concat(group_dict,axis=1) # ensure the groups are the 2nd level of the column index if df_cat.columns.nlevels>1: df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0) return df_cat
python
def _create_cat_describe(self,data): """ Describe the categorical data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cat : pandas DataFrame Summarise the categorical variables. """ group_dict = {} for g in self._groupbylvls: if self._groupby: d_slice = data.loc[data[self._groupby] == g, self._categorical] else: d_slice = data[self._categorical].copy() # create a dataframe with freq, proportion df = d_slice.copy() # convert type to string to avoid int converted to boolean, avoiding nans for column in df.columns: df[column] = [str(row) if not pd.isnull(row) else None for row in df[column].values] df = df.melt().groupby(['variable','value']).size().to_frame(name='freq') df.index.set_names('level', level=1, inplace=True) df['percent'] = df['freq'].div(df.freq.sum(level=0),level=0).astype(float)* 100 # set number of decimal places for percent if isinstance(self._decimals,int): n = self._decimals f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) elif isinstance(self._decimals,dict): df.loc[:,'percent'] = df.apply(self._format_cat, axis=1) else: n = 1 f = '{{:.{}f}}'.format(n) df['percent'] = df['percent'].astype(float).map(f.format) # add n column, listing total non-null values for each variable ct = d_slice.count().to_frame(name='n') ct.index.name = 'variable' df = df.join(ct) # add null count nulls = d_slice.isnull().sum().to_frame(name='isnull') nulls.index.name = 'variable' # only save null count to the first category for each variable # do this by extracting the first category from the df row index levels = df.reset_index()[['variable','level']].groupby('variable').first() # add this category to the nulls table nulls = nulls.join(levels) nulls.set_index('level', append=True, inplace=True) # join nulls to categorical df = df.join(nulls) # add summary column df['t1_summary'] = df.freq.map(str) + ' (' + df.percent.map(str) + ')' # add to dictionary group_dict[g] = df df_cat = pd.concat(group_dict,axis=1) # ensure the groups are the 2nd level of the column index if df_cat.columns.nlevels>1: df_cat = df_cat.swaplevel(0, 1, axis=1).sort_index(axis=1,level=0) return df_cat
Describe the categorical data. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df_cat : pandas DataFrame Summarise the categorical variables.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L443-L516
tompollard/tableone
tableone.py
TableOne._create_significance_table
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
python
def _create_significance_table(self,data): """ Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc. """ # list features of the variable e.g. matched, paired, n_expected df=pd.DataFrame(index=self._continuous+self._categorical, columns=['continuous','nonnormal','min_observed','pval','ptest']) df.index.rename('variable', inplace=True) df['continuous'] = np.where(df.index.isin(self._continuous),True,False) df['nonnormal'] = np.where(df.index.isin(self._nonnormal),True,False) # list values for each variable, grouped by groupby levels for v in df.index: is_continuous = df.loc[v]['continuous'] is_categorical = ~df.loc[v]['continuous'] is_normal = ~df.loc[v]['nonnormal'] # if continuous, group data into list of lists if is_continuous: catlevels = None grouped_data = [] for s in self._groupbylvls: lvl_data = data.loc[data[self._groupby]==s, v] # coerce to numeric and drop non-numeric data lvl_data = lvl_data.apply(pd.to_numeric, errors='coerce').dropna() # append to overall group data grouped_data.append(lvl_data.values) min_observed = len(min(grouped_data,key=len)) # if categorical, create contingency table elif is_categorical: catlevels = sorted(data[v].astype('category').cat.categories) grouped_data = pd.crosstab(data[self._groupby].rename('_groupby_var_'),data[v]) min_observed = grouped_data.sum(axis=1).min() # minimum number of observations across all levels df.loc[v,'min_observed'] = min_observed # compute pvalues df.loc[v,'pval'],df.loc[v,'ptest'] = self._p_test(v, grouped_data,is_continuous,is_categorical, is_normal,min_observed,catlevels) return df
Create a table containing p-values for significance tests. Add features of the distributions and the p-values to the dataframe. Parameters ---------- data : pandas DataFrame The input dataset. Returns ---------- df : pandas DataFrame A table containing the p-values, test name, etc.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L518-L572
tompollard/tableone
tableone.py
TableOne._create_cont_table
def _create_cont_table(self,data): """ Create tableone for continuous data. Returns ---------- table : pandas DataFrame A table summarising the continuous variables. """ # remove the t1_summary level table = self.cont_describe[['t1_summary']].copy() table.columns = table.columns.droplevel(level=0) # add a column of null counts as 1-count() from previous function nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull') try: table = table.join(nulltable) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(nulltable) # add an empty level column, for joining with cat table table['level'] = '' table.set_index([table.index,'level'],inplace=True) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
python
def _create_cont_table(self,data): """ Create tableone for continuous data. Returns ---------- table : pandas DataFrame A table summarising the continuous variables. """ # remove the t1_summary level table = self.cont_describe[['t1_summary']].copy() table.columns = table.columns.droplevel(level=0) # add a column of null counts as 1-count() from previous function nulltable = data[self._continuous].isnull().sum().to_frame(name='isnull') try: table = table.join(nulltable) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(nulltable) # add an empty level column, for joining with cat table table['level'] = '' table.set_index([table.index,'level'],inplace=True) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
Create tableone for continuous data. Returns ---------- table : pandas DataFrame A table summarising the continuous variables.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L642-L673
tompollard/tableone
tableone.py
TableOne._create_cat_table
def _create_cat_table(self,data): """ Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables. """ table = self.cat_describe['t1_summary'].copy() # add the total count of null values across all levels isnull = data[self._categorical].isnull().sum().to_frame(name='isnull') isnull.index.rename('variable', inplace=True) try: table = table.join(isnull) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(isnull) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
python
def _create_cat_table(self,data): """ Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables. """ table = self.cat_describe['t1_summary'].copy() # add the total count of null values across all levels isnull = data[self._categorical].isnull().sum().to_frame(name='isnull') isnull.index.rename('variable', inplace=True) try: table = table.join(isnull) except TypeError: # if columns form a CategoricalIndex, need to convert to string first table.columns = table.columns.astype(str) table = table.join(isnull) # add pval column if self._pval and self._pval_adjust: table = table.join(self._significance_table[['pval (adjusted)','ptest']]) elif self._pval: table = table.join(self._significance_table[['pval','ptest']]) return table
Create table one for categorical data. Returns ---------- table : pandas DataFrame A table summarising the categorical variables.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L675-L700
tompollard/tableone
tableone.py
TableOne._create_tableone
def _create_tableone(self,data): """ Create table 1 by combining the continuous and categorical tables. Returns ---------- table : pandas DataFrame The complete table one. """ if self._continuous and self._categorical: # support pandas<=0.22 try: table = pd.concat([self.cont_table,self.cat_table],sort=False) except: table = pd.concat([self.cont_table,self.cat_table]) elif self._continuous: table = self.cont_table elif self._categorical: table = self.cat_table # round pval column and convert to string if self._pval and self._pval_adjust: table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str) table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001' elif self._pval: table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str) table.loc[table['pval'] == '0.000', 'pval'] = '<0.001' # sort the table rows table.reset_index().set_index(['variable','level'], inplace=True) if self._sort: # alphabetical new_index = sorted(table.index.values) else: # sort by the columns argument new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0])) table = table.reindex(new_index) # if a limit has been set on the number of categorical variables # then re-order the variables by frequency if self._limit: levelcounts = data[self._categorical].nunique() levelcounts = levelcounts[levelcounts >= self._limit] for v,_ in levelcounts.iteritems(): count = data[v].value_counts().sort_values(ascending=False) new_index = [(v, i) for i in count.index] # restructure to match orig_index new_index_array=np.empty((len(new_index),), dtype=object) new_index_array[:]=[tuple(i) for i in new_index] orig_index = table.index.values.copy() orig_index[table.index.get_loc(v)] = new_index_array table = table.reindex(orig_index) # inserts n row n_row = pd.DataFrame(columns = ['variable','level','isnull']) n_row.set_index(['variable','level'], inplace=True) n_row.loc['n', ''] = None # support pandas<=0.22 try: table = pd.concat([n_row,table],sort=False) except: table = pd.concat([n_row,table]) if self._groupbylvls == ['overall']: table.loc['n','overall'] = len(data.index) else: for g in self._groupbylvls: ct = data[self._groupby][data[self._groupby]==g].count() table.loc['n',g] = ct # only display data in first level row dupe_mask = table.groupby(level=[0]).cumcount().ne(0) dupe_columns = ['isnull'] optional_columns = ['pval','pval (adjusted)','ptest'] for col in optional_columns: if col in table.columns.values: dupe_columns.append(col) table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('') # remove empty column added above table.drop([''], axis=1, inplace=True) # remove isnull column if not needed if not self._isnull: table.drop('isnull',axis=1,inplace=True) # replace nans with empty strings table.fillna('',inplace=True) # add column index if not self._groupbylvls == ['overall']: # rename groupby variable if requested c = self._groupby if self._alt_labels: if self._groupby in self._alt_labels: c = self._alt_labels[self._groupby] c = 'Grouped by {}'.format(c) table.columns = pd.MultiIndex.from_product([[c], table.columns]) # display alternative labels if assigned table.rename(index=self._create_row_labels(), inplace=True, level=0) # if a limit has been set on the number of categorical variables # limit the number of categorical variables that are displayed if self._limit: table = table.groupby('variable').head(self._limit) # re-order the columns in a consistent fashion if self._groupby: cols = table.columns.levels[1].values else: cols = table.columns.values if 'isnull' in cols: cols = ['isnull'] + [x for x in cols if x != 'isnull'] # iterate through each optional column # if they exist, put them at the end of the dataframe # ensures the last 3 columns will be in the same order as optional_columns for col in optional_columns: if col in cols: cols = [x for x in cols if x != col] + [col] if self._groupby: table = table.reindex(cols, axis=1, level=1) else: table = table.reindex(cols, axis=1) return table
python
def _create_tableone(self,data): """ Create table 1 by combining the continuous and categorical tables. Returns ---------- table : pandas DataFrame The complete table one. """ if self._continuous and self._categorical: # support pandas<=0.22 try: table = pd.concat([self.cont_table,self.cat_table],sort=False) except: table = pd.concat([self.cont_table,self.cat_table]) elif self._continuous: table = self.cont_table elif self._categorical: table = self.cat_table # round pval column and convert to string if self._pval and self._pval_adjust: table['pval (adjusted)'] = table['pval (adjusted)'].apply('{:.3f}'.format).astype(str) table.loc[table['pval (adjusted)'] == '0.000', 'pval (adjusted)'] = '<0.001' elif self._pval: table['pval'] = table['pval'].apply('{:.3f}'.format).astype(str) table.loc[table['pval'] == '0.000', 'pval'] = '<0.001' # sort the table rows table.reset_index().set_index(['variable','level'], inplace=True) if self._sort: # alphabetical new_index = sorted(table.index.values) else: # sort by the columns argument new_index = sorted(table.index.values,key=lambda x: self._columns.index(x[0])) table = table.reindex(new_index) # if a limit has been set on the number of categorical variables # then re-order the variables by frequency if self._limit: levelcounts = data[self._categorical].nunique() levelcounts = levelcounts[levelcounts >= self._limit] for v,_ in levelcounts.iteritems(): count = data[v].value_counts().sort_values(ascending=False) new_index = [(v, i) for i in count.index] # restructure to match orig_index new_index_array=np.empty((len(new_index),), dtype=object) new_index_array[:]=[tuple(i) for i in new_index] orig_index = table.index.values.copy() orig_index[table.index.get_loc(v)] = new_index_array table = table.reindex(orig_index) # inserts n row n_row = pd.DataFrame(columns = ['variable','level','isnull']) n_row.set_index(['variable','level'], inplace=True) n_row.loc['n', ''] = None # support pandas<=0.22 try: table = pd.concat([n_row,table],sort=False) except: table = pd.concat([n_row,table]) if self._groupbylvls == ['overall']: table.loc['n','overall'] = len(data.index) else: for g in self._groupbylvls: ct = data[self._groupby][data[self._groupby]==g].count() table.loc['n',g] = ct # only display data in first level row dupe_mask = table.groupby(level=[0]).cumcount().ne(0) dupe_columns = ['isnull'] optional_columns = ['pval','pval (adjusted)','ptest'] for col in optional_columns: if col in table.columns.values: dupe_columns.append(col) table[dupe_columns] = table[dupe_columns].mask(dupe_mask).fillna('') # remove empty column added above table.drop([''], axis=1, inplace=True) # remove isnull column if not needed if not self._isnull: table.drop('isnull',axis=1,inplace=True) # replace nans with empty strings table.fillna('',inplace=True) # add column index if not self._groupbylvls == ['overall']: # rename groupby variable if requested c = self._groupby if self._alt_labels: if self._groupby in self._alt_labels: c = self._alt_labels[self._groupby] c = 'Grouped by {}'.format(c) table.columns = pd.MultiIndex.from_product([[c], table.columns]) # display alternative labels if assigned table.rename(index=self._create_row_labels(), inplace=True, level=0) # if a limit has been set on the number of categorical variables # limit the number of categorical variables that are displayed if self._limit: table = table.groupby('variable').head(self._limit) # re-order the columns in a consistent fashion if self._groupby: cols = table.columns.levels[1].values else: cols = table.columns.values if 'isnull' in cols: cols = ['isnull'] + [x for x in cols if x != 'isnull'] # iterate through each optional column # if they exist, put them at the end of the dataframe # ensures the last 3 columns will be in the same order as optional_columns for col in optional_columns: if col in cols: cols = [x for x in cols if x != col] + [col] if self._groupby: table = table.reindex(cols, axis=1, level=1) else: table = table.reindex(cols, axis=1) return table
Create table 1 by combining the continuous and categorical tables. Returns ---------- table : pandas DataFrame The complete table one.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L702-L834
tompollard/tableone
tableone.py
TableOne._create_row_labels
def _create_row_labels(self): """ Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label. """ # start with the original column names labels = {} for c in self._columns: labels[c] = c # replace column names with alternative names if provided if self._alt_labels: for k in self._alt_labels.keys(): labels[k] = self._alt_labels[k] # append the label suffix if self._label_suffix: for k in labels.keys(): if k in self._nonnormal: labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]") elif k in self._categorical: labels[k] = "{}, {}".format(labels[k],"n (%)") else: labels[k] = "{}, {}".format(labels[k],"mean (SD)") return labels
python
def _create_row_labels(self): """ Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label. """ # start with the original column names labels = {} for c in self._columns: labels[c] = c # replace column names with alternative names if provided if self._alt_labels: for k in self._alt_labels.keys(): labels[k] = self._alt_labels[k] # append the label suffix if self._label_suffix: for k in labels.keys(): if k in self._nonnormal: labels[k] = "{}, {}".format(labels[k],"median [Q1,Q3]") elif k in self._categorical: labels[k] = "{}, {}".format(labels[k],"n (%)") else: labels[k] = "{}, {}".format(labels[k],"mean (SD)") return labels
Take the original labels for rows. Rename if alternative labels are provided. Append label suffix if label_suffix is True. Returns ---------- labels : dictionary Dictionary, keys are original column name, values are final label.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/tableone.py#L836-L867
tompollard/tableone
modality.py
dip_pval_tabinterpol
def dip_pval_tabinterpol(dip, N): ''' dip - dip value computed from dip_from_cdf N - number of observations ''' # if qDiptab_df is None: # raise DataError("Tabulated p-values not available. See installation instructions.") if np.isnan(N) or N < 10: return np.nan qDiptab_dict = {'0': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0625, 9: 0.0555555555555556, 10: 0.05, 15: 0.0341378172277919, 20: 0.033718563622065004, 30: 0.0262674485075642, 50: 0.0218544781364545, 100: 0.0164852597438403, 200: 0.0111236388849688, 500: 0.007554885975761959, 1000: 0.00541658127872122, 2000: 0.0039043999745055702, 5000: 0.00245657785440433, 10000: 0.00174954269199566, 20000: 0.00119458814106091, 40000: 0.000852415648011777, 72000: 0.000644400053256997}, '0.01': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0625, 9: 0.0613018090298924, 10: 0.0610132555623269, 15: 0.0546284208048975, 20: 0.0474333740698401, 30: 0.0395871890405749, 50: 0.0314400501999916, 100: 0.022831985803043, 200: 0.0165017735429825, 500: 0.0106403461127515, 1000: 0.0076028674530018705, 2000: 0.0054166418179658294, 5000: 0.0034480928223332603, 10000: 0.00244595133885302, 20000: 0.00173435346896287, 40000: 0.00122883479310665, 72000: 0.000916872204484283}, '0.02': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0656911994503283, 9: 0.0658615858179315, 10: 0.0651627333214016, 15: 0.0572191260231815, 20: 0.0490891387627092, 30: 0.0414574606741673, 50: 0.0329008160470834, 100: 0.0238917486442849, 200: 0.0172594157992489, 500: 0.0111255573208294, 1000: 0.00794987834644799, 2000: 0.0056617138625232296, 5000: 0.00360473943713036, 10000: 0.00255710802275612, 20000: 0.0018119443458468102, 40000: 0.0012846930445701802, 72000: 0.0009579329467655321}, '0.05': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0725717816250742, 8: 0.0738651136071762, 9: 0.0732651142535317, 10: 0.0718321619656165, 15: 0.0610087367689692, 20: 0.052719998201553, 30: 0.0444462614069956, 50: 0.0353023819040016, 100: 0.0256559537977579, 200: 0.0185259426032926, 500: 0.0119353655328931, 1000: 0.0085216518343594, 2000: 0.00607120971135229, 5000: 0.0038632654801084897, 10000: 0.00273990955227265, 20000: 0.00194259470485893, 40000: 0.0013761765052555301, 72000: 0.00102641863872347}, '0.1': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0817315478539489, 8: 0.0820045917762512, 9: 0.0803941629593475, 10: 0.077966212182459, 15: 0.0642657137330444, 20: 0.0567795509056742, 30: 0.0473998525042686, 50: 0.0377279973102482, 100: 0.0273987414570948, 200: 0.0197917612637521, 500: 0.0127411306411808, 1000: 0.00909775605533253, 2000: 0.0064762535755248, 5000: 0.00412089506752692, 10000: 0.0029225480567908, 20000: 0.00207173719623868, 40000: 0.0014675150200632301, 72000: 0.0010949515421800199}, '0.2': {4: 0.125, 5: 0.1, 6: 0.0924514470941933, 7: 0.0940590181922527, 8: 0.0922700601131892, 9: 0.0890432420913848, 10: 0.0852835359834564, 15: 0.0692234107989591, 20: 0.0620134674468181, 30: 0.0516677370374349, 50: 0.0410699984399582, 100: 0.0298109370830153, 200: 0.0215233745778454, 500: 0.0138524542751814, 1000: 0.00988924521014078, 2000: 0.00703573098590029, 5000: 0.00447640050137479, 10000: 0.00317374638422465, 20000: 0.00224993202086955, 40000: 0.00159376453672466, 72000: 0.00118904090369415}, '0.3': {4: 0.125, 5: 0.1, 6: 0.103913431059949, 7: 0.10324449080087102, 8: 0.0996737189599363, 9: 0.0950811420297928, 10: 0.0903204173707099, 15: 0.0745462114365167, 20: 0.0660163872069048, 30: 0.0551037519001622, 50: 0.0437704598622665, 100: 0.0317771496530253, 200: 0.0229259769870428, 500: 0.0147536004288476, 1000: 0.0105309297090482, 2000: 0.007494212545892991, 5000: 0.00476555693102276, 10000: 0.00338072258533527, 20000: 0.00239520831473419, 40000: 0.00169668445506151, 72000: 0.00126575197699874}, '0.4': {4: 0.125, 5: 0.10872059357632902, 6: 0.113885220640212, 7: 0.110964599995697, 8: 0.10573353180273701, 9: 0.0999380897811046, 10: 0.0943334983745117, 15: 0.0792030878981762, 20: 0.0696506075066401, 30: 0.058265005347492994, 50: 0.0462925642671299, 100: 0.0336073821590387, 200: 0.024243848341112, 500: 0.0155963185751048, 1000: 0.0111322726797384, 2000: 0.007920878896017329, 5000: 0.005037040297500721, 10000: 0.0035724387653598205, 20000: 0.00253036792824665, 40000: 0.0017925341833790601, 72000: 0.00133750966361506}, '0.5': {4: 0.125, 5: 0.12156379802641401, 6: 0.123071187137781, 7: 0.11780784650433501, 8: 0.11103512984770501, 9: 0.10415356007586801, 10: 0.0977817630384725, 15: 0.083621033469191, 20: 0.0733437740592714, 30: 0.0614510857304343, 50: 0.048851155289608, 100: 0.0354621760592113, 200: 0.025584358256487003, 500: 0.0164519238025286, 1000: 0.0117439009052552, 2000: 0.008355737247680059, 5000: 0.0053123924740821294, 10000: 0.00376734715752209, 20000: 0.00266863168718114, 40000: 0.00189061261635977, 72000: 0.00141049709228472}, '0.6': {4: 0.125, 5: 0.134318918697053, 6: 0.13186973390253, 7: 0.124216086833531, 8: 0.11592005574998801, 9: 0.10800780236193198, 10: 0.102180866696628, 15: 0.0881198482202905, 20: 0.0776460662880254, 30: 0.0649164408053978, 50: 0.0516145897865757, 100: 0.0374805844550272, 200: 0.0270252129816288, 500: 0.017383057902553, 1000: 0.012405033293814, 2000: 0.00882439333812351, 5000: 0.00560929919359959, 10000: 0.00397885007249132, 20000: 0.0028181999035216, 40000: 0.00199645471886179, 72000: 0.00148936709298802}, '0.7': {4: 0.13255954878268902, 5: 0.14729879897625198, 6: 0.140564796497941, 7: 0.130409013968317, 8: 0.120561479262465, 9: 0.112512617124951, 10: 0.10996094814295099, 15: 0.093124666680253, 20: 0.0824558407118372, 30: 0.0689178762425442, 50: 0.0548121932066019, 100: 0.0398046179116599, 200: 0.0286920262150517, 500: 0.0184503949887735, 1000: 0.0131684179320803, 2000: 0.009367858207170609, 5000: 0.00595352728377949, 10000: 0.00422430013176233, 20000: 0.00299137548142077, 40000: 0.00211929748381704, 72000: 0.00158027541945626}, '0.8': {4: 0.15749736904023498, 5: 0.161085025702604, 6: 0.14941924112913002, 7: 0.136639642123068, 8: 0.125558759034845, 9: 0.12291503348081699, 10: 0.11884476721158699, 15: 0.0996694393390689, 20: 0.08834462700173701, 30: 0.0739249074078291, 50: 0.0588230482851366, 100: 0.0427283846799166, 200: 0.0308006766341406, 500: 0.0198162679782071, 1000: 0.0141377942603047, 2000: 0.01005604603884, 5000: 0.00639092280563517, 10000: 0.00453437508148542, 20000: 0.00321024899920135, 40000: 0.0022745769870358102, 72000: 0.00169651643860074}, '0.9': {4: 0.18740187880755899, 5: 0.176811998476076, 6: 0.159137064572627, 7: 0.144240669035124, 8: 0.141841067033899, 9: 0.136412639387084, 10: 0.130462149644819, 15: 0.11008749690090598, 20: 0.0972346018122903, 30: 0.0814791379390127, 50: 0.0649136324046767, 100: 0.047152783315718, 200: 0.0339967814293504, 500: 0.0218781313182203, 1000: 0.0156148055023058, 2000: 0.0111019116837591, 5000: 0.00705566126234625, 10000: 0.00500178808402368, 20000: 0.00354362220314155, 40000: 0.00250999080890397, 72000: 0.0018730618472582602}, '0.95': {4: 0.20726978858735998, 5: 0.18639179602794398, 6: 0.164769608513302, 7: 0.159903395678336, 8: 0.153978303998561, 9: 0.14660378495401902, 10: 0.139611395137099, 15: 0.118760769203664, 20: 0.105130218270636, 30: 0.0881689143126666, 50: 0.0702737877191269, 100: 0.0511279442868827, 200: 0.0368418413878307, 500: 0.0237294742633411, 1000: 0.0169343970067564, 2000: 0.0120380990328341, 5000: 0.0076506368153935, 10000: 0.00542372242836395, 20000: 0.00384330190244679, 40000: 0.00272375073486223, 72000: 0.00203178401610555}, '0.98': {4: 0.22375580462922195, 5: 0.19361253363045, 6: 0.17917654739278197, 7: 0.17519655327122302, 8: 0.16597856724751, 9: 0.157084065653166, 10: 0.150961728882481, 15: 0.128890475210055, 20: 0.11430970428125302, 30: 0.0960564383013644, 50: 0.0767095886079179, 100: 0.0558022052195208, 200: 0.0402729850316397, 500: 0.025919578977657003, 1000: 0.018513067368104, 2000: 0.0131721010552576, 5000: 0.00836821687047215, 10000: 0.00592656681022859, 20000: 0.00420258799378253, 40000: 0.00298072958568387, 72000: 0.00222356097506054}, '0.99': {4: 0.231796258864192, 5: 0.19650913979884502, 6: 0.191862827995563, 7: 0.184118659121501, 8: 0.172988528276759, 9: 0.164164643657217, 10: 0.159684158858235, 15: 0.13598356863636, 20: 0.120624043335821, 30: 0.101478558893837, 50: 0.0811998415355918, 100: 0.059024132304226, 200: 0.0426864799777448, 500: 0.0274518022761997, 1000: 0.0196080260483234, 2000: 0.0139655122281969, 5000: 0.00886357892854914, 10000: 0.00628034732880374, 20000: 0.00445774902155711, 40000: 0.00315942194040388, 72000: 0.00235782814777627}, '0.995': {4: 0.23726374382677898, 5: 0.198159967287576, 6: 0.20210197104296804, 7: 0.19101439617430602, 8: 0.179010413496374, 9: 0.172821674582338, 10: 0.16719524735674, 15: 0.14245248368127697, 20: 0.126552378036739, 30: 0.10650487144103, 50: 0.0852854646662134, 100: 0.0620425065165146, 200: 0.044958959158761, 500: 0.0288986369564301, 1000: 0.0206489568587364, 2000: 0.0146889122204488, 5000: 0.00934162787186159, 10000: 0.00661030641550873, 20000: 0.00469461513212743, 40000: 0.0033273652798148, 72000: 0.00248343580127067}, '0.998': {4: 0.241992892688593, 5: 0.19924427936243302, 6: 0.213015781111186, 7: 0.198216795232182, 8: 0.186504388711178, 9: 0.182555283567818, 10: 0.175419540856082, 15: 0.15017281653074202, 20: 0.13360135382395, 30: 0.112724636524262, 50: 0.0904847827490294, 100: 0.0658016011466099, 200: 0.0477643873749449, 500: 0.0306813505050163, 1000: 0.0219285176765082, 2000: 0.0156076779647454, 5000: 0.009932186363240291, 10000: 0.00702254699967648, 20000: 0.004994160691291679, 40000: 0.00353988965698579, 72000: 0.00264210826339498}, '0.999': {4: 0.244369839049632, 5: 0.199617527406166, 6: 0.219518627282415, 7: 0.20234101074826102, 8: 0.19448404115794, 9: 0.188658833121906, 10: 0.180611195797351, 15: 0.15545613369632802, 20: 0.138569903791767, 30: 0.117164140184417, 50: 0.0940930106666244, 100: 0.0684479731118028, 200: 0.0497198001867437, 500: 0.0320170996823189, 1000: 0.0228689168972669, 2000: 0.0162685615996248, 5000: 0.0103498795291629, 10000: 0.0073182262815645795, 20000: 0.00520917757743218, 40000: 0.00369400045486625, 72000: 0.0027524322157581}, '0.9995': {4: 0.245966625504691, 5: 0.19980094149902802, 6: 0.22433904739444602, 7: 0.205377566346832, 8: 0.200864297005026, 9: 0.19408912076824603, 10: 0.18528641605039603, 15: 0.160896499106958, 20: 0.14336916123968, 30: 0.12142585990898701, 50: 0.0974904344916743, 100: 0.0709169443994193, 200: 0.0516114611801451, 500: 0.0332452747332959, 1000: 0.023738710122235003, 2000: 0.0168874937789415, 5000: 0.0107780907076862, 10000: 0.0076065423418208, 20000: 0.005403962359243721, 40000: 0.00383345715372182, 72000: 0.0028608570740143}, '0.9998': {4: 0.24743959723326198, 5: 0.19991708183427104, 6: 0.22944933215424101, 7: 0.208306562526874, 8: 0.20884999705022897, 9: 0.19915700809389003, 10: 0.19120308390504398, 15: 0.16697940794624802, 20: 0.148940116394883, 30: 0.126733051889401, 50: 0.10228420428399698, 100: 0.0741183486081263, 200: 0.0540543978864652, 500: 0.0348335698576168, 1000: 0.0248334158891432, 2000: 0.0176505093388153, 5000: 0.0113184316868283, 10000: 0.00795640367207482, 20000: 0.00564540201704594, 40000: 0.0040079346963469605, 72000: 0.00298695044508003}, '0.9999': {4: 0.24823065965663801, 5: 0.19995902909307503, 6: 0.232714530449602, 7: 0.209866047852379, 8: 0.212556040406219, 9: 0.20288159843655804, 10: 0.19580515933918397, 15: 0.17111793515551002, 20: 0.152832538183622, 30: 0.131198578897542, 50: 0.104680624334611, 100: 0.0762579402903838, 200: 0.0558704526182638, 500: 0.0359832389317461, 1000: 0.0256126573433596, 2000: 0.0181944265400504, 5000: 0.0117329446468571, 10000: 0.0082270524584354, 20000: 0.00580460792299214, 40000: 0.00414892737222885, 72000: 0.00309340092038059}, '0.99995': {4: 0.248754269146416, 5: 0.19997839537608197, 6: 0.236548128358969, 7: 0.21096757693345103, 8: 0.21714917413729898, 9: 0.205979795735129, 10: 0.20029398089673, 15: 0.17590050570443203, 20: 0.15601016361897102, 30: 0.133691739483444, 50: 0.107496694235039, 100: 0.0785735967934979, 200: 0.0573877056330228, 500: 0.0369051995840645, 1000: 0.0265491336936829, 2000: 0.0186226037818523, 5000: 0.0119995948968375, 10000: 0.00852240989786251, 20000: 0.00599774739593151, 40000: 0.0042839159079761, 72000: 0.00319932767198801}, '0.99998': {4: 0.24930203997425898, 5: 0.199993151405815, 6: 0.2390887911995, 7: 0.212233348558702, 8: 0.22170007640450304, 9: 0.21054115498898, 10: 0.20565108964621898, 15: 0.18185667601316602, 20: 0.16131922583934502, 30: 0.137831637950694, 50: 0.11140887547015, 100: 0.0813458356889133, 200: 0.0593365901653878, 500: 0.0387221159256424, 1000: 0.027578430100535997, 2000: 0.0193001796565433, 5000: 0.0124410052027886, 10000: 0.00892863905540303, 20000: 0.00633099254378114, 40000: 0.0044187010443287895, 72000: 0.00332688234611187}, '0.99999': {4: 0.24945965232322498, 5: 0.199995525025673, 6: 0.24010356643629502, 7: 0.21266103831250602, 8: 0.225000835357532, 9: 0.21180033095039003, 10: 0.209682048785853, 15: 0.185743454151004, 20: 0.165568255916749, 30: 0.14155750962435099, 50: 0.113536607717411, 100: 0.0832963013755522, 200: 0.0607646310473911, 500: 0.039930259057650005, 1000: 0.0284430733108, 2000: 0.0196241518040617, 5000: 0.0129467396733128, 10000: 0.009138539330002129, 20000: 0.00656987109386762, 40000: 0.00450818604569179, 72000: 0.00339316094477355}, '1': {4: 0.24974836247845, 5: 0.199999835639211, 6: 0.24467288361776798, 7: 0.21353618608817, 8: 0.23377291968768302, 9: 0.21537991431762502, 10: 0.221530282182963, 15: 0.19224056333056197, 20: 0.175834459522789, 30: 0.163833046059817, 50: 0.11788671686531199, 100: 0.0926780423096737, 200: 0.0705309107882395, 500: 0.0431448163617178, 1000: 0.0313640941982108, 2000: 0.0213081254074584, 5000: 0.014396063834027, 10000: 0.00952234579566773, 20000: 0.006858294480462271, 40000: 0.00513477467565583, 72000: 0.00376331697005859}} qDiptab_df = pd.DataFrame(qDiptab_dict) diptable = np.array(qDiptab_df) ps = np.array(qDiptab_df.columns).astype(float) Ns = np.array(qDiptab_df.index) if N >= Ns[-1]: dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1) N = Ns[-1]-0.1 iNlow = np.nonzero(Ns < N)[0][-1] qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow]) dip_sqrtN = np.sqrt(N)*dip dip_interpol_sqrtN = ( np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*( np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :])) if not (dip_interpol_sqrtN < dip_sqrtN).any(): return 1 iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1] if iplow == len(dip_interpol_sqrtN) - 1: return 0 qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow]) p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow]) return 1 - p_interpol
python
def dip_pval_tabinterpol(dip, N): ''' dip - dip value computed from dip_from_cdf N - number of observations ''' # if qDiptab_df is None: # raise DataError("Tabulated p-values not available. See installation instructions.") if np.isnan(N) or N < 10: return np.nan qDiptab_dict = {'0': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0625, 9: 0.0555555555555556, 10: 0.05, 15: 0.0341378172277919, 20: 0.033718563622065004, 30: 0.0262674485075642, 50: 0.0218544781364545, 100: 0.0164852597438403, 200: 0.0111236388849688, 500: 0.007554885975761959, 1000: 0.00541658127872122, 2000: 0.0039043999745055702, 5000: 0.00245657785440433, 10000: 0.00174954269199566, 20000: 0.00119458814106091, 40000: 0.000852415648011777, 72000: 0.000644400053256997}, '0.01': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0625, 9: 0.0613018090298924, 10: 0.0610132555623269, 15: 0.0546284208048975, 20: 0.0474333740698401, 30: 0.0395871890405749, 50: 0.0314400501999916, 100: 0.022831985803043, 200: 0.0165017735429825, 500: 0.0106403461127515, 1000: 0.0076028674530018705, 2000: 0.0054166418179658294, 5000: 0.0034480928223332603, 10000: 0.00244595133885302, 20000: 0.00173435346896287, 40000: 0.00122883479310665, 72000: 0.000916872204484283}, '0.02': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0714285714285714, 8: 0.0656911994503283, 9: 0.0658615858179315, 10: 0.0651627333214016, 15: 0.0572191260231815, 20: 0.0490891387627092, 30: 0.0414574606741673, 50: 0.0329008160470834, 100: 0.0238917486442849, 200: 0.0172594157992489, 500: 0.0111255573208294, 1000: 0.00794987834644799, 2000: 0.0056617138625232296, 5000: 0.00360473943713036, 10000: 0.00255710802275612, 20000: 0.0018119443458468102, 40000: 0.0012846930445701802, 72000: 0.0009579329467655321}, '0.05': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0725717816250742, 8: 0.0738651136071762, 9: 0.0732651142535317, 10: 0.0718321619656165, 15: 0.0610087367689692, 20: 0.052719998201553, 30: 0.0444462614069956, 50: 0.0353023819040016, 100: 0.0256559537977579, 200: 0.0185259426032926, 500: 0.0119353655328931, 1000: 0.0085216518343594, 2000: 0.00607120971135229, 5000: 0.0038632654801084897, 10000: 0.00273990955227265, 20000: 0.00194259470485893, 40000: 0.0013761765052555301, 72000: 0.00102641863872347}, '0.1': {4: 0.125, 5: 0.1, 6: 0.0833333333333333, 7: 0.0817315478539489, 8: 0.0820045917762512, 9: 0.0803941629593475, 10: 0.077966212182459, 15: 0.0642657137330444, 20: 0.0567795509056742, 30: 0.0473998525042686, 50: 0.0377279973102482, 100: 0.0273987414570948, 200: 0.0197917612637521, 500: 0.0127411306411808, 1000: 0.00909775605533253, 2000: 0.0064762535755248, 5000: 0.00412089506752692, 10000: 0.0029225480567908, 20000: 0.00207173719623868, 40000: 0.0014675150200632301, 72000: 0.0010949515421800199}, '0.2': {4: 0.125, 5: 0.1, 6: 0.0924514470941933, 7: 0.0940590181922527, 8: 0.0922700601131892, 9: 0.0890432420913848, 10: 0.0852835359834564, 15: 0.0692234107989591, 20: 0.0620134674468181, 30: 0.0516677370374349, 50: 0.0410699984399582, 100: 0.0298109370830153, 200: 0.0215233745778454, 500: 0.0138524542751814, 1000: 0.00988924521014078, 2000: 0.00703573098590029, 5000: 0.00447640050137479, 10000: 0.00317374638422465, 20000: 0.00224993202086955, 40000: 0.00159376453672466, 72000: 0.00118904090369415}, '0.3': {4: 0.125, 5: 0.1, 6: 0.103913431059949, 7: 0.10324449080087102, 8: 0.0996737189599363, 9: 0.0950811420297928, 10: 0.0903204173707099, 15: 0.0745462114365167, 20: 0.0660163872069048, 30: 0.0551037519001622, 50: 0.0437704598622665, 100: 0.0317771496530253, 200: 0.0229259769870428, 500: 0.0147536004288476, 1000: 0.0105309297090482, 2000: 0.007494212545892991, 5000: 0.00476555693102276, 10000: 0.00338072258533527, 20000: 0.00239520831473419, 40000: 0.00169668445506151, 72000: 0.00126575197699874}, '0.4': {4: 0.125, 5: 0.10872059357632902, 6: 0.113885220640212, 7: 0.110964599995697, 8: 0.10573353180273701, 9: 0.0999380897811046, 10: 0.0943334983745117, 15: 0.0792030878981762, 20: 0.0696506075066401, 30: 0.058265005347492994, 50: 0.0462925642671299, 100: 0.0336073821590387, 200: 0.024243848341112, 500: 0.0155963185751048, 1000: 0.0111322726797384, 2000: 0.007920878896017329, 5000: 0.005037040297500721, 10000: 0.0035724387653598205, 20000: 0.00253036792824665, 40000: 0.0017925341833790601, 72000: 0.00133750966361506}, '0.5': {4: 0.125, 5: 0.12156379802641401, 6: 0.123071187137781, 7: 0.11780784650433501, 8: 0.11103512984770501, 9: 0.10415356007586801, 10: 0.0977817630384725, 15: 0.083621033469191, 20: 0.0733437740592714, 30: 0.0614510857304343, 50: 0.048851155289608, 100: 0.0354621760592113, 200: 0.025584358256487003, 500: 0.0164519238025286, 1000: 0.0117439009052552, 2000: 0.008355737247680059, 5000: 0.0053123924740821294, 10000: 0.00376734715752209, 20000: 0.00266863168718114, 40000: 0.00189061261635977, 72000: 0.00141049709228472}, '0.6': {4: 0.125, 5: 0.134318918697053, 6: 0.13186973390253, 7: 0.124216086833531, 8: 0.11592005574998801, 9: 0.10800780236193198, 10: 0.102180866696628, 15: 0.0881198482202905, 20: 0.0776460662880254, 30: 0.0649164408053978, 50: 0.0516145897865757, 100: 0.0374805844550272, 200: 0.0270252129816288, 500: 0.017383057902553, 1000: 0.012405033293814, 2000: 0.00882439333812351, 5000: 0.00560929919359959, 10000: 0.00397885007249132, 20000: 0.0028181999035216, 40000: 0.00199645471886179, 72000: 0.00148936709298802}, '0.7': {4: 0.13255954878268902, 5: 0.14729879897625198, 6: 0.140564796497941, 7: 0.130409013968317, 8: 0.120561479262465, 9: 0.112512617124951, 10: 0.10996094814295099, 15: 0.093124666680253, 20: 0.0824558407118372, 30: 0.0689178762425442, 50: 0.0548121932066019, 100: 0.0398046179116599, 200: 0.0286920262150517, 500: 0.0184503949887735, 1000: 0.0131684179320803, 2000: 0.009367858207170609, 5000: 0.00595352728377949, 10000: 0.00422430013176233, 20000: 0.00299137548142077, 40000: 0.00211929748381704, 72000: 0.00158027541945626}, '0.8': {4: 0.15749736904023498, 5: 0.161085025702604, 6: 0.14941924112913002, 7: 0.136639642123068, 8: 0.125558759034845, 9: 0.12291503348081699, 10: 0.11884476721158699, 15: 0.0996694393390689, 20: 0.08834462700173701, 30: 0.0739249074078291, 50: 0.0588230482851366, 100: 0.0427283846799166, 200: 0.0308006766341406, 500: 0.0198162679782071, 1000: 0.0141377942603047, 2000: 0.01005604603884, 5000: 0.00639092280563517, 10000: 0.00453437508148542, 20000: 0.00321024899920135, 40000: 0.0022745769870358102, 72000: 0.00169651643860074}, '0.9': {4: 0.18740187880755899, 5: 0.176811998476076, 6: 0.159137064572627, 7: 0.144240669035124, 8: 0.141841067033899, 9: 0.136412639387084, 10: 0.130462149644819, 15: 0.11008749690090598, 20: 0.0972346018122903, 30: 0.0814791379390127, 50: 0.0649136324046767, 100: 0.047152783315718, 200: 0.0339967814293504, 500: 0.0218781313182203, 1000: 0.0156148055023058, 2000: 0.0111019116837591, 5000: 0.00705566126234625, 10000: 0.00500178808402368, 20000: 0.00354362220314155, 40000: 0.00250999080890397, 72000: 0.0018730618472582602}, '0.95': {4: 0.20726978858735998, 5: 0.18639179602794398, 6: 0.164769608513302, 7: 0.159903395678336, 8: 0.153978303998561, 9: 0.14660378495401902, 10: 0.139611395137099, 15: 0.118760769203664, 20: 0.105130218270636, 30: 0.0881689143126666, 50: 0.0702737877191269, 100: 0.0511279442868827, 200: 0.0368418413878307, 500: 0.0237294742633411, 1000: 0.0169343970067564, 2000: 0.0120380990328341, 5000: 0.0076506368153935, 10000: 0.00542372242836395, 20000: 0.00384330190244679, 40000: 0.00272375073486223, 72000: 0.00203178401610555}, '0.98': {4: 0.22375580462922195, 5: 0.19361253363045, 6: 0.17917654739278197, 7: 0.17519655327122302, 8: 0.16597856724751, 9: 0.157084065653166, 10: 0.150961728882481, 15: 0.128890475210055, 20: 0.11430970428125302, 30: 0.0960564383013644, 50: 0.0767095886079179, 100: 0.0558022052195208, 200: 0.0402729850316397, 500: 0.025919578977657003, 1000: 0.018513067368104, 2000: 0.0131721010552576, 5000: 0.00836821687047215, 10000: 0.00592656681022859, 20000: 0.00420258799378253, 40000: 0.00298072958568387, 72000: 0.00222356097506054}, '0.99': {4: 0.231796258864192, 5: 0.19650913979884502, 6: 0.191862827995563, 7: 0.184118659121501, 8: 0.172988528276759, 9: 0.164164643657217, 10: 0.159684158858235, 15: 0.13598356863636, 20: 0.120624043335821, 30: 0.101478558893837, 50: 0.0811998415355918, 100: 0.059024132304226, 200: 0.0426864799777448, 500: 0.0274518022761997, 1000: 0.0196080260483234, 2000: 0.0139655122281969, 5000: 0.00886357892854914, 10000: 0.00628034732880374, 20000: 0.00445774902155711, 40000: 0.00315942194040388, 72000: 0.00235782814777627}, '0.995': {4: 0.23726374382677898, 5: 0.198159967287576, 6: 0.20210197104296804, 7: 0.19101439617430602, 8: 0.179010413496374, 9: 0.172821674582338, 10: 0.16719524735674, 15: 0.14245248368127697, 20: 0.126552378036739, 30: 0.10650487144103, 50: 0.0852854646662134, 100: 0.0620425065165146, 200: 0.044958959158761, 500: 0.0288986369564301, 1000: 0.0206489568587364, 2000: 0.0146889122204488, 5000: 0.00934162787186159, 10000: 0.00661030641550873, 20000: 0.00469461513212743, 40000: 0.0033273652798148, 72000: 0.00248343580127067}, '0.998': {4: 0.241992892688593, 5: 0.19924427936243302, 6: 0.213015781111186, 7: 0.198216795232182, 8: 0.186504388711178, 9: 0.182555283567818, 10: 0.175419540856082, 15: 0.15017281653074202, 20: 0.13360135382395, 30: 0.112724636524262, 50: 0.0904847827490294, 100: 0.0658016011466099, 200: 0.0477643873749449, 500: 0.0306813505050163, 1000: 0.0219285176765082, 2000: 0.0156076779647454, 5000: 0.009932186363240291, 10000: 0.00702254699967648, 20000: 0.004994160691291679, 40000: 0.00353988965698579, 72000: 0.00264210826339498}, '0.999': {4: 0.244369839049632, 5: 0.199617527406166, 6: 0.219518627282415, 7: 0.20234101074826102, 8: 0.19448404115794, 9: 0.188658833121906, 10: 0.180611195797351, 15: 0.15545613369632802, 20: 0.138569903791767, 30: 0.117164140184417, 50: 0.0940930106666244, 100: 0.0684479731118028, 200: 0.0497198001867437, 500: 0.0320170996823189, 1000: 0.0228689168972669, 2000: 0.0162685615996248, 5000: 0.0103498795291629, 10000: 0.0073182262815645795, 20000: 0.00520917757743218, 40000: 0.00369400045486625, 72000: 0.0027524322157581}, '0.9995': {4: 0.245966625504691, 5: 0.19980094149902802, 6: 0.22433904739444602, 7: 0.205377566346832, 8: 0.200864297005026, 9: 0.19408912076824603, 10: 0.18528641605039603, 15: 0.160896499106958, 20: 0.14336916123968, 30: 0.12142585990898701, 50: 0.0974904344916743, 100: 0.0709169443994193, 200: 0.0516114611801451, 500: 0.0332452747332959, 1000: 0.023738710122235003, 2000: 0.0168874937789415, 5000: 0.0107780907076862, 10000: 0.0076065423418208, 20000: 0.005403962359243721, 40000: 0.00383345715372182, 72000: 0.0028608570740143}, '0.9998': {4: 0.24743959723326198, 5: 0.19991708183427104, 6: 0.22944933215424101, 7: 0.208306562526874, 8: 0.20884999705022897, 9: 0.19915700809389003, 10: 0.19120308390504398, 15: 0.16697940794624802, 20: 0.148940116394883, 30: 0.126733051889401, 50: 0.10228420428399698, 100: 0.0741183486081263, 200: 0.0540543978864652, 500: 0.0348335698576168, 1000: 0.0248334158891432, 2000: 0.0176505093388153, 5000: 0.0113184316868283, 10000: 0.00795640367207482, 20000: 0.00564540201704594, 40000: 0.0040079346963469605, 72000: 0.00298695044508003}, '0.9999': {4: 0.24823065965663801, 5: 0.19995902909307503, 6: 0.232714530449602, 7: 0.209866047852379, 8: 0.212556040406219, 9: 0.20288159843655804, 10: 0.19580515933918397, 15: 0.17111793515551002, 20: 0.152832538183622, 30: 0.131198578897542, 50: 0.104680624334611, 100: 0.0762579402903838, 200: 0.0558704526182638, 500: 0.0359832389317461, 1000: 0.0256126573433596, 2000: 0.0181944265400504, 5000: 0.0117329446468571, 10000: 0.0082270524584354, 20000: 0.00580460792299214, 40000: 0.00414892737222885, 72000: 0.00309340092038059}, '0.99995': {4: 0.248754269146416, 5: 0.19997839537608197, 6: 0.236548128358969, 7: 0.21096757693345103, 8: 0.21714917413729898, 9: 0.205979795735129, 10: 0.20029398089673, 15: 0.17590050570443203, 20: 0.15601016361897102, 30: 0.133691739483444, 50: 0.107496694235039, 100: 0.0785735967934979, 200: 0.0573877056330228, 500: 0.0369051995840645, 1000: 0.0265491336936829, 2000: 0.0186226037818523, 5000: 0.0119995948968375, 10000: 0.00852240989786251, 20000: 0.00599774739593151, 40000: 0.0042839159079761, 72000: 0.00319932767198801}, '0.99998': {4: 0.24930203997425898, 5: 0.199993151405815, 6: 0.2390887911995, 7: 0.212233348558702, 8: 0.22170007640450304, 9: 0.21054115498898, 10: 0.20565108964621898, 15: 0.18185667601316602, 20: 0.16131922583934502, 30: 0.137831637950694, 50: 0.11140887547015, 100: 0.0813458356889133, 200: 0.0593365901653878, 500: 0.0387221159256424, 1000: 0.027578430100535997, 2000: 0.0193001796565433, 5000: 0.0124410052027886, 10000: 0.00892863905540303, 20000: 0.00633099254378114, 40000: 0.0044187010443287895, 72000: 0.00332688234611187}, '0.99999': {4: 0.24945965232322498, 5: 0.199995525025673, 6: 0.24010356643629502, 7: 0.21266103831250602, 8: 0.225000835357532, 9: 0.21180033095039003, 10: 0.209682048785853, 15: 0.185743454151004, 20: 0.165568255916749, 30: 0.14155750962435099, 50: 0.113536607717411, 100: 0.0832963013755522, 200: 0.0607646310473911, 500: 0.039930259057650005, 1000: 0.0284430733108, 2000: 0.0196241518040617, 5000: 0.0129467396733128, 10000: 0.009138539330002129, 20000: 0.00656987109386762, 40000: 0.00450818604569179, 72000: 0.00339316094477355}, '1': {4: 0.24974836247845, 5: 0.199999835639211, 6: 0.24467288361776798, 7: 0.21353618608817, 8: 0.23377291968768302, 9: 0.21537991431762502, 10: 0.221530282182963, 15: 0.19224056333056197, 20: 0.175834459522789, 30: 0.163833046059817, 50: 0.11788671686531199, 100: 0.0926780423096737, 200: 0.0705309107882395, 500: 0.0431448163617178, 1000: 0.0313640941982108, 2000: 0.0213081254074584, 5000: 0.014396063834027, 10000: 0.00952234579566773, 20000: 0.006858294480462271, 40000: 0.00513477467565583, 72000: 0.00376331697005859}} qDiptab_df = pd.DataFrame(qDiptab_dict) diptable = np.array(qDiptab_df) ps = np.array(qDiptab_df.columns).astype(float) Ns = np.array(qDiptab_df.index) if N >= Ns[-1]: dip = transform_dip_to_other_nbr_pts(dip, N, Ns[-1]-0.1) N = Ns[-1]-0.1 iNlow = np.nonzero(Ns < N)[0][-1] qN = (N-Ns[iNlow])/(Ns[iNlow+1]-Ns[iNlow]) dip_sqrtN = np.sqrt(N)*dip dip_interpol_sqrtN = ( np.sqrt(Ns[iNlow])*diptable[iNlow, :] + qN*( np.sqrt(Ns[iNlow+1])*diptable[iNlow+1, :]-np.sqrt(Ns[iNlow])*diptable[iNlow, :])) if not (dip_interpol_sqrtN < dip_sqrtN).any(): return 1 iplow = np.nonzero(dip_interpol_sqrtN < dip_sqrtN)[0][-1] if iplow == len(dip_interpol_sqrtN) - 1: return 0 qp = (dip_sqrtN-dip_interpol_sqrtN[iplow])/(dip_interpol_sqrtN[iplow+1]-dip_interpol_sqrtN[iplow]) p_interpol = ps[iplow] + qp*(ps[iplow+1]-ps[iplow]) return 1 - p_interpol
dip - dip value computed from dip_from_cdf N - number of observations
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L127-L713
tompollard/tableone
modality.py
dip_and_closest_unimodal_from_cdf
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12): ''' Dip computed as distance between empirical distribution function (EDF) and cumulative distribution function for the unimodal distribution with smallest such distance. The optimal unimodal distribution is found by the algorithm presented in Hartigan (1985): Computation of the dip statistic to test for unimodaliy. Applied Statistics, vol. 34, no. 3 If the plotting option is enabled the optimal unimodal distribution function is plotted along with (xF, yF-dip) and (xF, yF+dip) xF - x-coordinates for EDF yF - y-coordinates for EDF ''' ## TODO! Preprocess xF and yF so that yF increasing and xF does ## not have more than two copies of each x-value. if (xF[1:]-xF[:-1] < -eps).any(): raise ValueError('Need sorted x-values to compute dip') if (yF[1:]-yF[:-1] < -eps).any(): raise ValueError('Need sorted y-values to compute dip') # if plotting: # Nplot = 5 # bfig = plt.figure(figsize=(12, 3)) # i = 1 # plot index D = 0 # lower bound for dip*2 # [L, U] is interval where we still need to find unimodal function, # the modal interval L = 0 U = len(xF) - 1 # iGfin are the indices of xF where the optimal unimodal distribution is greatest # convex minorant to (xF, yF+dip) # iHfin are the indices of xF where the optimal unimodal distribution is least # concave majorant to (xF, yF-dip) iGfin = L iHfin = U while 1: iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)]) iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)]) iG = np.arange(L, U+1)[iGG] iH = np.arange(L, U+1)[iHH] # Interpolate. First and last point are in both and does not need # interpolation. Might cause trouble if included due to possiblity # of infinity slope at beginning or end of interval. if iG[0] != iH[0] or iG[-1] != iH[-1]: raise ValueError('Convex minorant and concave majorant should start and end at same points.') hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH]) gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG]) hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]]) gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]]) #hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH]) #gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG]) # Find largest difference between GCM and LCM. gdiff = hipl - yF[iG] hdiff = yF[iH] - gipl imaxdiffg = np.argmax(gdiff) imaxdiffh = np.argmax(hdiff) d = max(gdiff[imaxdiffg], hdiff[imaxdiffh]) # # Plot current GCM and LCM. # if plotting: # if i > Nplot: # bfig = plt.figure(figsize=(12, 3)) # i = 1 # bax = bfig.add_subplot(1, Nplot, i) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-d/2, color='black') # bax.plot(xF, yF+d/2, color='black') # bax.plot(xF[iG], yF[iG]+d/2, color='blue') # bax.plot(xF[iH], yF[iH]-d/2, color='blue') # if d <= D: # if verbose: # print("Difference in modal interval smaller than current dip") # break # Find new modal interval so that largest difference is at endpoint # and set d to largest distance between current GCM and LCM. if gdiff[imaxdiffg] > hdiff[imaxdiffh]: L0 = iG[imaxdiffg] U0 = iH[iH >= L0][0] else: U0 = iH[imaxdiffh] L0 = iG[iG <= U0][-1] # Add points outside the modal interval to the final GCM and LCM. iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]]) iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin]) # # Plot new modal interval # if plotting: # ymin, ymax = bax.get_ylim() # bax.axvline(xF[L0], ymin, ymax, color='orange') # bax.axvline(xF[U0], ymin, ymax, color='red') # bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L])) # Compute new lower bound for dip*2 # i.e. largest difference outside modal interval gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG]) D = max(D, np.amax(yF[L:(L0+1)] - gipl)) hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH]) D = max(D, np.amax(hipl - yF[U0:(U+1)])) if xF[U0]-xF[L0] < eps: if verbose: print("Modal interval zero length") break # if plotting: # mxpt = np.argmax(yF[L:(L0+1)] - gipl) # bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2, # gipl[mxpt]+d/2], '+', color='red') # mxpt = np.argmax(hipl - yF[U0:(U+1)]) # bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2, # hipl[mxpt]-d/2], '+', color='red') # i += 1 # Change modal interval L = L0 U = U0 if d <= D: if verbose: print("Difference in modal interval smaller than new dip") break # if plotting: # # Add modal interval to figure # bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed') # bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed') # ## Plot unimodal function (not distribution function) # bfig = plt.figure() # bax = bfig.add_subplot(1, 1, 1) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-D/2, color='black') # bax.plot(xF, yF+D/2, color='black') # Find string position in modal interval iM = np.arange(iGfin[-1], iHfin[0]+1) yM_lower = yF[iM]-D/2 yM_lower[0] = yF[iM[0]]+D/2 iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower) iM_concave = iM[iMM_concave] #bax.plot(xF[iM], yM_lower, color='orange') #bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red') lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave]) try: mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]] #bax.axvline(xF[mode], color='green', linestyle='dashed') except IndexError: iM_convex = np.zeros(0, dtype='i') else: after_mode = iM_concave > mode iM_concave = iM_concave[after_mode] iMM_concave = iMM_concave[after_mode] iM = iM[iM <= mode] iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])] # if plotting: # bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])], # np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2, # yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue') # #bax.plot(xF[iM], yM_lower, color='orange') # ## Plot unimodal distribution function # bfig = plt.figure() # bax = bfig.add_subplot(1, 1, 1) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-D/2, color='black') # bax.plot(xF, yF+D/2, color='black') # Find string position in modal interval iM = np.arange(iGfin[-1], iHfin[0]+1) yM_lower = yF[iM]-D/2 yM_lower[0] = yF[iM[0]]+D/2 iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower) iM_concave = iM[iMM_concave] #bax.plot(xF[iM], yM_lower, color='orange') #bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red') lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave]) try: mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]] #bax.axvline(xF[mode], color='green', linestyle='dashed') except IndexError: iM_convex = np.zeros(0, dtype='i') else: after_mode = iM_concave > mode iM_concave = iM_concave[after_mode] iMM_concave = iMM_concave[after_mode] iM = iM[iM <= mode] iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])] # Closest unimodal curve xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])] yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2, yM_lower[iMM_concave], yF[iHfin[1:]] - D/2]) # Add points so unimodal curve goes from 0 to 1 k_start = (yU[1]-yU[0])/(xU[1]-xU[0]+1e-5) xU_start = xU[0] - yU[0]/(k_start+1e-5) k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2]+1e-5) xU_end = xU[-1] + (1-yU[-1])/(k_end+1e-5) xU = np.hstack([xU_start, xU, xU_end]) yU = np.hstack([0, yU, 1]) # if plotting: # bax.plot(xU, yU, color='blue') # #bax.plot(xF[iM], yM_lower, color='orange') # plt.show() return D/2, (xU, yU)
python
def dip_and_closest_unimodal_from_cdf(xF, yF, plotting=False, verbose=False, eps=1e-12): ''' Dip computed as distance between empirical distribution function (EDF) and cumulative distribution function for the unimodal distribution with smallest such distance. The optimal unimodal distribution is found by the algorithm presented in Hartigan (1985): Computation of the dip statistic to test for unimodaliy. Applied Statistics, vol. 34, no. 3 If the plotting option is enabled the optimal unimodal distribution function is plotted along with (xF, yF-dip) and (xF, yF+dip) xF - x-coordinates for EDF yF - y-coordinates for EDF ''' ## TODO! Preprocess xF and yF so that yF increasing and xF does ## not have more than two copies of each x-value. if (xF[1:]-xF[:-1] < -eps).any(): raise ValueError('Need sorted x-values to compute dip') if (yF[1:]-yF[:-1] < -eps).any(): raise ValueError('Need sorted y-values to compute dip') # if plotting: # Nplot = 5 # bfig = plt.figure(figsize=(12, 3)) # i = 1 # plot index D = 0 # lower bound for dip*2 # [L, U] is interval where we still need to find unimodal function, # the modal interval L = 0 U = len(xF) - 1 # iGfin are the indices of xF where the optimal unimodal distribution is greatest # convex minorant to (xF, yF+dip) # iHfin are the indices of xF where the optimal unimodal distribution is least # concave majorant to (xF, yF-dip) iGfin = L iHfin = U while 1: iGG = greatest_convex_minorant_sorted(xF[L:(U+1)], yF[L:(U+1)]) iHH = least_concave_majorant_sorted(xF[L:(U+1)], yF[L:(U+1)]) iG = np.arange(L, U+1)[iGG] iH = np.arange(L, U+1)[iHH] # Interpolate. First and last point are in both and does not need # interpolation. Might cause trouble if included due to possiblity # of infinity slope at beginning or end of interval. if iG[0] != iH[0] or iG[-1] != iH[-1]: raise ValueError('Convex minorant and concave majorant should start and end at same points.') hipl = np.interp(xF[iG[1:-1]], xF[iH], yF[iH]) gipl = np.interp(xF[iH[1:-1]], xF[iG], yF[iG]) hipl = np.hstack([yF[iH[0]], hipl, yF[iH[-1]]]) gipl = np.hstack([yF[iG[0]], gipl, yF[iG[-1]]]) #hipl = lin_interpol_sorted(xF[iG], xF[iH], yF[iH]) #gipl = lin_interpol_sorted(xF[iH], xF[iG], yF[iG]) # Find largest difference between GCM and LCM. gdiff = hipl - yF[iG] hdiff = yF[iH] - gipl imaxdiffg = np.argmax(gdiff) imaxdiffh = np.argmax(hdiff) d = max(gdiff[imaxdiffg], hdiff[imaxdiffh]) # # Plot current GCM and LCM. # if plotting: # if i > Nplot: # bfig = plt.figure(figsize=(12, 3)) # i = 1 # bax = bfig.add_subplot(1, Nplot, i) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-d/2, color='black') # bax.plot(xF, yF+d/2, color='black') # bax.plot(xF[iG], yF[iG]+d/2, color='blue') # bax.plot(xF[iH], yF[iH]-d/2, color='blue') # if d <= D: # if verbose: # print("Difference in modal interval smaller than current dip") # break # Find new modal interval so that largest difference is at endpoint # and set d to largest distance between current GCM and LCM. if gdiff[imaxdiffg] > hdiff[imaxdiffh]: L0 = iG[imaxdiffg] U0 = iH[iH >= L0][0] else: U0 = iH[imaxdiffh] L0 = iG[iG <= U0][-1] # Add points outside the modal interval to the final GCM and LCM. iGfin = np.hstack([iGfin, iG[(iG <= L0)*(iG > L)]]) iHfin = np.hstack([iH[(iH >= U0)*(iH < U)], iHfin]) # # Plot new modal interval # if plotting: # ymin, ymax = bax.get_ylim() # bax.axvline(xF[L0], ymin, ymax, color='orange') # bax.axvline(xF[U0], ymin, ymax, color='red') # bax.set_xlim(xF[L]-.1*(xF[U]-xF[L]), xF[U]+.1*(xF[U]-xF[L])) # Compute new lower bound for dip*2 # i.e. largest difference outside modal interval gipl = np.interp(xF[L:(L0+1)], xF[iG], yF[iG]) D = max(D, np.amax(yF[L:(L0+1)] - gipl)) hipl = np.interp(xF[U0:(U+1)], xF[iH], yF[iH]) D = max(D, np.amax(hipl - yF[U0:(U+1)])) if xF[U0]-xF[L0] < eps: if verbose: print("Modal interval zero length") break # if plotting: # mxpt = np.argmax(yF[L:(L0+1)] - gipl) # bax.plot([xF[L:][mxpt], xF[L:][mxpt]], [yF[L:][mxpt]+d/2, # gipl[mxpt]+d/2], '+', color='red') # mxpt = np.argmax(hipl - yF[U0:(U+1)]) # bax.plot([xF[U0:][mxpt], xF[U0:][mxpt]], [yF[U0:][mxpt]-d/2, # hipl[mxpt]-d/2], '+', color='red') # i += 1 # Change modal interval L = L0 U = U0 if d <= D: if verbose: print("Difference in modal interval smaller than new dip") break # if plotting: # # Add modal interval to figure # bax.axvline(xF[L0], ymin, ymax, color='green', linestyle='dashed') # bax.axvline(xF[U0], ymin, ymax, color='green', linestyle='dashed') # ## Plot unimodal function (not distribution function) # bfig = plt.figure() # bax = bfig.add_subplot(1, 1, 1) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-D/2, color='black') # bax.plot(xF, yF+D/2, color='black') # Find string position in modal interval iM = np.arange(iGfin[-1], iHfin[0]+1) yM_lower = yF[iM]-D/2 yM_lower[0] = yF[iM[0]]+D/2 iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower) iM_concave = iM[iMM_concave] #bax.plot(xF[iM], yM_lower, color='orange') #bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red') lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave]) try: mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]] #bax.axvline(xF[mode], color='green', linestyle='dashed') except IndexError: iM_convex = np.zeros(0, dtype='i') else: after_mode = iM_concave > mode iM_concave = iM_concave[after_mode] iMM_concave = iMM_concave[after_mode] iM = iM[iM <= mode] iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])] # if plotting: # bax.plot(xF[np.hstack([iGfin, iM_convex, iM_concave, iHfin])], # np.hstack([yF[iGfin] + D/2, yF[iM_convex] + D/2, # yM_lower[iMM_concave], yF[iHfin] - D/2]), color='blue') # #bax.plot(xF[iM], yM_lower, color='orange') # ## Plot unimodal distribution function # bfig = plt.figure() # bax = bfig.add_subplot(1, 1, 1) # bax.plot(xF, yF, color='red') # bax.plot(xF, yF-D/2, color='black') # bax.plot(xF, yF+D/2, color='black') # Find string position in modal interval iM = np.arange(iGfin[-1], iHfin[0]+1) yM_lower = yF[iM]-D/2 yM_lower[0] = yF[iM[0]]+D/2 iMM_concave = least_concave_majorant_sorted(xF[iM], yM_lower) iM_concave = iM[iMM_concave] #bax.plot(xF[iM], yM_lower, color='orange') #bax.plot(xF[iM_concave], yM_lower[iMM_concave], color='red') lcm_ipl = np.interp(xF[iM], xF[iM_concave], yM_lower[iMM_concave]) try: mode = iM[np.nonzero(lcm_ipl > yF[iM]+D/2)[0][-1]] #bax.axvline(xF[mode], color='green', linestyle='dashed') except IndexError: iM_convex = np.zeros(0, dtype='i') else: after_mode = iM_concave > mode iM_concave = iM_concave[after_mode] iMM_concave = iMM_concave[after_mode] iM = iM[iM <= mode] iM_convex = iM[greatest_convex_minorant_sorted(xF[iM], yF[iM])] # Closest unimodal curve xU = xF[np.hstack([iGfin[:-1], iM_convex, iM_concave, iHfin[1:]])] yU = np.hstack([yF[iGfin[:-1]] + D/2, yF[iM_convex] + D/2, yM_lower[iMM_concave], yF[iHfin[1:]] - D/2]) # Add points so unimodal curve goes from 0 to 1 k_start = (yU[1]-yU[0])/(xU[1]-xU[0]+1e-5) xU_start = xU[0] - yU[0]/(k_start+1e-5) k_end = (yU[-1]-yU[-2])/(xU[-1]-xU[-2]+1e-5) xU_end = xU[-1] + (1-yU[-1])/(k_end+1e-5) xU = np.hstack([xU_start, xU, xU_end]) yU = np.hstack([0, yU, 1]) # if plotting: # bax.plot(xU, yU, color='blue') # #bax.plot(xF[iM], yM_lower, color='orange') # plt.show() return D/2, (xU, yU)
Dip computed as distance between empirical distribution function (EDF) and cumulative distribution function for the unimodal distribution with smallest such distance. The optimal unimodal distribution is found by the algorithm presented in Hartigan (1985): Computation of the dip statistic to test for unimodaliy. Applied Statistics, vol. 34, no. 3 If the plotting option is enabled the optimal unimodal distribution function is plotted along with (xF, yF-dip) and (xF, yF+dip) xF - x-coordinates for EDF yF - y-coordinates for EDF
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L719-L942
tompollard/tableone
modality.py
bandwidth_factor
def bandwidth_factor(nbr_data_pts, deriv_order=0): ''' Scale factor for one-dimensional plug-in bandwidth selection. ''' if deriv_order == 0: return (3.0*nbr_data_pts/4)**(-1.0/5) if deriv_order == 2: return (7.0*nbr_data_pts/4)**(-1.0/9) raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
python
def bandwidth_factor(nbr_data_pts, deriv_order=0): ''' Scale factor for one-dimensional plug-in bandwidth selection. ''' if deriv_order == 0: return (3.0*nbr_data_pts/4)**(-1.0/5) if deriv_order == 2: return (7.0*nbr_data_pts/4)**(-1.0/9) raise ValueError('Not implemented for derivative of order {}'.format(deriv_order))
Scale factor for one-dimensional plug-in bandwidth selection.
https://github.com/tompollard/tableone/blob/4a274d3d2f8d16b8eaa0bde030f3da29b876cee8/modality.py#L1011-L1021
Pylons/paginate
paginate/__init__.py
make_html_tag
def make_html_tag(tag, text=None, **params): """Create an HTML tag string. tag The HTML tag to use (e.g. 'a', 'span' or 'div') text The text to enclose between opening and closing tag. If no text is specified then only the opening tag is returned. Example:: make_html_tag('a', text="Hello", href="/another/page") -> <a href="/another/page">Hello</a> To use reserved Python keywords like "class" as a parameter prepend it with an underscore. Instead of "class='green'" use "_class='green'". Warning: Quotes and apostrophes are not escaped.""" params_string = "" # Parameters are passed. Turn the dict into a string like "a=1 b=2 c=3" string. for key, value in sorted(params.items()): # Strip off a leading underscore from the attribute's key to allow attributes like '_class' # to be used as a CSS class specification instead of the reserved Python keyword 'class'. key = key.lstrip("_") params_string += u' {0}="{1}"'.format(key, value) # Create the tag string tag_string = u"<{0}{1}>".format(tag, params_string) # Add text and closing tag if required. if text: tag_string += u"{0}</{1}>".format(text, tag) return tag_string
python
def make_html_tag(tag, text=None, **params): """Create an HTML tag string. tag The HTML tag to use (e.g. 'a', 'span' or 'div') text The text to enclose between opening and closing tag. If no text is specified then only the opening tag is returned. Example:: make_html_tag('a', text="Hello", href="/another/page") -> <a href="/another/page">Hello</a> To use reserved Python keywords like "class" as a parameter prepend it with an underscore. Instead of "class='green'" use "_class='green'". Warning: Quotes and apostrophes are not escaped.""" params_string = "" # Parameters are passed. Turn the dict into a string like "a=1 b=2 c=3" string. for key, value in sorted(params.items()): # Strip off a leading underscore from the attribute's key to allow attributes like '_class' # to be used as a CSS class specification instead of the reserved Python keyword 'class'. key = key.lstrip("_") params_string += u' {0}="{1}"'.format(key, value) # Create the tag string tag_string = u"<{0}{1}>".format(tag, params_string) # Add text and closing tag if required. if text: tag_string += u"{0}</{1}>".format(text, tag) return tag_string
Create an HTML tag string. tag The HTML tag to use (e.g. 'a', 'span' or 'div') text The text to enclose between opening and closing tag. If no text is specified then only the opening tag is returned. Example:: make_html_tag('a', text="Hello", href="/another/page") -> <a href="/another/page">Hello</a> To use reserved Python keywords like "class" as a parameter prepend it with an underscore. Instead of "class='green'" use "_class='green'". Warning: Quotes and apostrophes are not escaped.
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L828-L863
Pylons/paginate
paginate/__init__.py
Page.pager
def pager( self, format="~2~", url=None, show_if_single_page=False, separator=" ", symbol_first="&lt;&lt;", symbol_last="&gt;&gt;", symbol_previous="&lt;", symbol_next="&gt;", link_attr=None, curpage_attr=None, dotdot_attr=None, link_tag=None, ): """ Return string with links to other pages (e.g. '1 .. 5 6 7 [8] 9 10 11 .. 50'). format: Format string that defines how the pager is rendered. The string can contain the following $-tokens that are substituted by the string.Template module: - $first_page: number of first reachable page - $last_page: number of last reachable page - $page: number of currently selected page - $page_count: number of reachable pages - $items_per_page: maximal number of items per page - $first_item: index of first item on the current page - $last_item: index of last item on the current page - $item_count: total number of items - $link_first: link to first page (unless this is first page) - $link_last: link to last page (unless this is last page) - $link_previous: link to previous page (unless this is first page) - $link_next: link to next page (unless this is last page) To render a range of pages the token '~3~' can be used. The number sets the radius of pages around the current page. Example for a range with radius 3: '1 .. 5 6 7 [8] 9 10 11 .. 50' Default: '~2~' url The URL that page links will point to. Make sure it contains the string $page which will be replaced by the actual page number. Must be given unless a url_maker is specified to __init__, in which case this parameter is ignored. symbol_first String to be displayed as the text for the $link_first link above. Default: '&lt;&lt;' (<<) symbol_last String to be displayed as the text for the $link_last link above. Default: '&gt;&gt;' (>>) symbol_previous String to be displayed as the text for the $link_previous link above. Default: '&lt;' (<) symbol_next String to be displayed as the text for the $link_next link above. Default: '&gt;' (>) separator: String that is used to separate page links/numbers in the above range of pages. Default: ' ' show_if_single_page: if True the navigator will be shown even if there is only one page. Default: False link_attr (optional) A dictionary of attributes that get added to A-HREF links pointing to other pages. Can be used to define a CSS style or class to customize the look of links. Example: { 'style':'border: 1px solid green' } Example: { 'class':'pager_link' } curpage_attr (optional) A dictionary of attributes that get added to the current page number in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'border: 3px solid blue' } Example: { 'class':'pager_curpage' } dotdot_attr (optional) A dictionary of attributes that get added to the '..' string in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'color: #808080' } Example: { 'class':'pager_dotdot' } link_tag (optional) A callable that accepts single argument `page` (page link information) and generates string with html that represents the link for specific page. Page objects are supplied from `link_map()` so the keys are the same. """ link_attr = link_attr or {} curpage_attr = curpage_attr or {} dotdot_attr = dotdot_attr or {} self.curpage_attr = curpage_attr self.separator = separator self.link_attr = link_attr self.dotdot_attr = dotdot_attr self.url = url self.link_tag = link_tag or self.default_link_tag # Don't show navigator if there is no more than one page if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page): return "" regex_res = re.search(r"~(\d+)~", format) if regex_res: radius = regex_res.group(1) else: radius = 2 radius = int(radius) self.radius = radius link_map = self.link_map( format=format, url=url, show_if_single_page=show_if_single_page, separator=separator, symbol_first=symbol_first, symbol_last=symbol_last, symbol_previous=symbol_previous, symbol_next=symbol_next, link_attr=link_attr, curpage_attr=curpage_attr, dotdot_attr=dotdot_attr, ) links_markup = self._range(link_map, radius) # Replace ~...~ in token format by range of pages result = re.sub(r"~(\d+)~", links_markup, format) link_first = ( self.page > self.first_page and self.link_tag(link_map["first_page"]) or "" ) link_last = ( self.page < self.last_page and self.link_tag(link_map["last_page"]) or "" ) link_previous = ( self.previous_page and self.link_tag(link_map["previous_page"]) or "" ) link_next = self.next_page and self.link_tag(link_map["next_page"]) or "" # Interpolate '$' variables result = Template(result).safe_substitute( { "first_page": self.first_page, "last_page": self.last_page, "page": self.page, "page_count": self.page_count, "items_per_page": self.items_per_page, "first_item": self.first_item, "last_item": self.last_item, "item_count": self.item_count, "link_first": link_first, "link_last": link_last, "link_previous": link_previous, "link_next": link_next, } ) return result
python
def pager( self, format="~2~", url=None, show_if_single_page=False, separator=" ", symbol_first="&lt;&lt;", symbol_last="&gt;&gt;", symbol_previous="&lt;", symbol_next="&gt;", link_attr=None, curpage_attr=None, dotdot_attr=None, link_tag=None, ): """ Return string with links to other pages (e.g. '1 .. 5 6 7 [8] 9 10 11 .. 50'). format: Format string that defines how the pager is rendered. The string can contain the following $-tokens that are substituted by the string.Template module: - $first_page: number of first reachable page - $last_page: number of last reachable page - $page: number of currently selected page - $page_count: number of reachable pages - $items_per_page: maximal number of items per page - $first_item: index of first item on the current page - $last_item: index of last item on the current page - $item_count: total number of items - $link_first: link to first page (unless this is first page) - $link_last: link to last page (unless this is last page) - $link_previous: link to previous page (unless this is first page) - $link_next: link to next page (unless this is last page) To render a range of pages the token '~3~' can be used. The number sets the radius of pages around the current page. Example for a range with radius 3: '1 .. 5 6 7 [8] 9 10 11 .. 50' Default: '~2~' url The URL that page links will point to. Make sure it contains the string $page which will be replaced by the actual page number. Must be given unless a url_maker is specified to __init__, in which case this parameter is ignored. symbol_first String to be displayed as the text for the $link_first link above. Default: '&lt;&lt;' (<<) symbol_last String to be displayed as the text for the $link_last link above. Default: '&gt;&gt;' (>>) symbol_previous String to be displayed as the text for the $link_previous link above. Default: '&lt;' (<) symbol_next String to be displayed as the text for the $link_next link above. Default: '&gt;' (>) separator: String that is used to separate page links/numbers in the above range of pages. Default: ' ' show_if_single_page: if True the navigator will be shown even if there is only one page. Default: False link_attr (optional) A dictionary of attributes that get added to A-HREF links pointing to other pages. Can be used to define a CSS style or class to customize the look of links. Example: { 'style':'border: 1px solid green' } Example: { 'class':'pager_link' } curpage_attr (optional) A dictionary of attributes that get added to the current page number in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'border: 3px solid blue' } Example: { 'class':'pager_curpage' } dotdot_attr (optional) A dictionary of attributes that get added to the '..' string in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'color: #808080' } Example: { 'class':'pager_dotdot' } link_tag (optional) A callable that accepts single argument `page` (page link information) and generates string with html that represents the link for specific page. Page objects are supplied from `link_map()` so the keys are the same. """ link_attr = link_attr or {} curpage_attr = curpage_attr or {} dotdot_attr = dotdot_attr or {} self.curpage_attr = curpage_attr self.separator = separator self.link_attr = link_attr self.dotdot_attr = dotdot_attr self.url = url self.link_tag = link_tag or self.default_link_tag # Don't show navigator if there is no more than one page if self.page_count == 0 or (self.page_count == 1 and not show_if_single_page): return "" regex_res = re.search(r"~(\d+)~", format) if regex_res: radius = regex_res.group(1) else: radius = 2 radius = int(radius) self.radius = radius link_map = self.link_map( format=format, url=url, show_if_single_page=show_if_single_page, separator=separator, symbol_first=symbol_first, symbol_last=symbol_last, symbol_previous=symbol_previous, symbol_next=symbol_next, link_attr=link_attr, curpage_attr=curpage_attr, dotdot_attr=dotdot_attr, ) links_markup = self._range(link_map, radius) # Replace ~...~ in token format by range of pages result = re.sub(r"~(\d+)~", links_markup, format) link_first = ( self.page > self.first_page and self.link_tag(link_map["first_page"]) or "" ) link_last = ( self.page < self.last_page and self.link_tag(link_map["last_page"]) or "" ) link_previous = ( self.previous_page and self.link_tag(link_map["previous_page"]) or "" ) link_next = self.next_page and self.link_tag(link_map["next_page"]) or "" # Interpolate '$' variables result = Template(result).safe_substitute( { "first_page": self.first_page, "last_page": self.last_page, "page": self.page, "page_count": self.page_count, "items_per_page": self.items_per_page, "first_item": self.first_item, "last_item": self.last_item, "item_count": self.item_count, "link_first": link_first, "link_last": link_last, "link_previous": link_previous, "link_next": link_next, } ) return result
Return string with links to other pages (e.g. '1 .. 5 6 7 [8] 9 10 11 .. 50'). format: Format string that defines how the pager is rendered. The string can contain the following $-tokens that are substituted by the string.Template module: - $first_page: number of first reachable page - $last_page: number of last reachable page - $page: number of currently selected page - $page_count: number of reachable pages - $items_per_page: maximal number of items per page - $first_item: index of first item on the current page - $last_item: index of last item on the current page - $item_count: total number of items - $link_first: link to first page (unless this is first page) - $link_last: link to last page (unless this is last page) - $link_previous: link to previous page (unless this is first page) - $link_next: link to next page (unless this is last page) To render a range of pages the token '~3~' can be used. The number sets the radius of pages around the current page. Example for a range with radius 3: '1 .. 5 6 7 [8] 9 10 11 .. 50' Default: '~2~' url The URL that page links will point to. Make sure it contains the string $page which will be replaced by the actual page number. Must be given unless a url_maker is specified to __init__, in which case this parameter is ignored. symbol_first String to be displayed as the text for the $link_first link above. Default: '&lt;&lt;' (<<) symbol_last String to be displayed as the text for the $link_last link above. Default: '&gt;&gt;' (>>) symbol_previous String to be displayed as the text for the $link_previous link above. Default: '&lt;' (<) symbol_next String to be displayed as the text for the $link_next link above. Default: '&gt;' (>) separator: String that is used to separate page links/numbers in the above range of pages. Default: ' ' show_if_single_page: if True the navigator will be shown even if there is only one page. Default: False link_attr (optional) A dictionary of attributes that get added to A-HREF links pointing to other pages. Can be used to define a CSS style or class to customize the look of links. Example: { 'style':'border: 1px solid green' } Example: { 'class':'pager_link' } curpage_attr (optional) A dictionary of attributes that get added to the current page number in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'border: 3px solid blue' } Example: { 'class':'pager_curpage' } dotdot_attr (optional) A dictionary of attributes that get added to the '..' string in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'color: #808080' } Example: { 'class':'pager_dotdot' } link_tag (optional) A callable that accepts single argument `page` (page link information) and generates string with html that represents the link for specific page. Page objects are supplied from `link_map()` so the keys are the same.
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L336-L513
Pylons/paginate
paginate/__init__.py
Page.link_map
def link_map( self, format="~2~", url=None, show_if_single_page=False, separator=" ", symbol_first="&lt;&lt;", symbol_last="&gt;&gt;", symbol_previous="&lt;", symbol_next="&gt;", link_attr=None, curpage_attr=None, dotdot_attr=None, ): """ Return map with links to other pages if default pager() function is not suitable solution. format: Format string that defines how the pager would be normally rendered rendered. Uses same arguments as pager() method, but returns a simple dictionary in form of: {'current_page': {'attrs': {}, 'href': 'http://example.org/foo/page=1', 'value': 1}, 'first_page': {'attrs': {}, 'href': 'http://example.org/foo/page=1', 'type': 'first_page', 'value': 1}, 'last_page': {'attrs': {}, 'href': 'http://example.org/foo/page=8', 'type': 'last_page', 'value': 8}, 'next_page': {'attrs': {}, 'href': 'HREF', 'type': 'next_page', 'value': 2}, 'previous_page': None, 'range_pages': [{'attrs': {}, 'href': 'http://example.org/foo/page=1', 'type': 'current_page', 'value': 1}, .... {'attrs': {}, 'href': '', 'type': 'span', 'value': '..'}]} The string can contain the following $-tokens that are substituted by the string.Template module: - $first_page: number of first reachable page - $last_page: number of last reachable page - $page: number of currently selected page - $page_count: number of reachable pages - $items_per_page: maximal number of items per page - $first_item: index of first item on the current page - $last_item: index of last item on the current page - $item_count: total number of items - $link_first: link to first page (unless this is first page) - $link_last: link to last page (unless this is last page) - $link_previous: link to previous page (unless this is first page) - $link_next: link to next page (unless this is last page) To render a range of pages the token '~3~' can be used. The number sets the radius of pages around the current page. Example for a range with radius 3: '1 .. 5 6 7 [8] 9 10 11 .. 50' Default: '~2~' url The URL that page links will point to. Make sure it contains the string $page which will be replaced by the actual page number. Must be given unless a url_maker is specified to __init__, in which case this parameter is ignored. symbol_first String to be displayed as the text for the $link_first link above. Default: '&lt;&lt;' (<<) symbol_last String to be displayed as the text for the $link_last link above. Default: '&gt;&gt;' (>>) symbol_previous String to be displayed as the text for the $link_previous link above. Default: '&lt;' (<) symbol_next String to be displayed as the text for the $link_next link above. Default: '&gt;' (>) separator: String that is used to separate page links/numbers in the above range of pages. Default: ' ' show_if_single_page: if True the navigator will be shown even if there is only one page. Default: False link_attr (optional) A dictionary of attributes that get added to A-HREF links pointing to other pages. Can be used to define a CSS style or class to customize the look of links. Example: { 'style':'border: 1px solid green' } Example: { 'class':'pager_link' } curpage_attr (optional) A dictionary of attributes that get added to the current page number in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'border: 3px solid blue' } Example: { 'class':'pager_curpage' } dotdot_attr (optional) A dictionary of attributes that get added to the '..' string in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'color: #808080' } Example: { 'class':'pager_dotdot' } """ link_attr = link_attr or {} curpage_attr = curpage_attr or {} dotdot_attr = dotdot_attr or {} self.curpage_attr = curpage_attr self.separator = separator self.link_attr = link_attr self.dotdot_attr = dotdot_attr self.url = url regex_res = re.search(r"~(\d+)~", format) if regex_res: radius = regex_res.group(1) else: radius = 2 radius = int(radius) self.radius = radius # Compute the first and last page number within the radius # e.g. '1 .. 5 6 [7] 8 9 .. 12' # -> leftmost_page = 5 # -> rightmost_page = 9 leftmost_page = ( max(self.first_page, (self.page - radius)) if self.first_page else None ) rightmost_page = ( min(self.last_page, (self.page + radius)) if self.last_page else None ) nav_items = { "first_page": None, "last_page": None, "previous_page": None, "next_page": None, "current_page": None, "radius": self.radius, "range_pages": [], } if leftmost_page is None or rightmost_page is None: return nav_items nav_items["first_page"] = { "type": "first_page", "value": unicode(symbol_first), "attrs": self.link_attr, "number": self.first_page, "href": self.url_maker(self.first_page), } # Insert dots if there are pages between the first page # and the currently displayed page range if leftmost_page - self.first_page > 1: # Wrap in a SPAN tag if dotdot_attr is set nav_items["range_pages"].append( { "type": "span", "value": "..", "attrs": self.dotdot_attr, "href": "", "number": None, } ) for thispage in range(leftmost_page, rightmost_page + 1): # Highlight the current page number and do not use a link if thispage == self.page: # Wrap in a SPAN tag if curpage_attr is set nav_items["range_pages"].append( { "type": "current_page", "value": unicode(thispage), "number": thispage, "attrs": self.curpage_attr, "href": self.url_maker(thispage), } ) nav_items["current_page"] = { "value": thispage, "attrs": self.curpage_attr, "type": "current_page", "href": self.url_maker(thispage), } # Otherwise create just a link to that page else: nav_items["range_pages"].append( { "type": "page", "value": unicode(thispage), "number": thispage, "attrs": self.link_attr, "href": self.url_maker(thispage), } ) # Insert dots if there are pages between the displayed # page numbers and the end of the page range if self.last_page - rightmost_page > 1: # Wrap in a SPAN tag if dotdot_attr is set nav_items["range_pages"].append( { "type": "span", "value": "..", "attrs": self.dotdot_attr, "href": "", "number": None, } ) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) nav_items["last_page"] = { "type": "last_page", "value": unicode(symbol_last), "attrs": self.link_attr, "href": self.url_maker(self.last_page), "number": self.last_page, } nav_items["previous_page"] = { "type": "previous_page", "value": unicode(symbol_previous), "attrs": self.link_attr, "number": self.previous_page or self.first_page, "href": self.url_maker(self.previous_page or self.first_page), } nav_items["next_page"] = { "type": "next_page", "value": unicode(symbol_next), "attrs": self.link_attr, "number": self.next_page or self.last_page, "href": self.url_maker(self.next_page or self.last_page), } return nav_items
python
def link_map( self, format="~2~", url=None, show_if_single_page=False, separator=" ", symbol_first="&lt;&lt;", symbol_last="&gt;&gt;", symbol_previous="&lt;", symbol_next="&gt;", link_attr=None, curpage_attr=None, dotdot_attr=None, ): """ Return map with links to other pages if default pager() function is not suitable solution. format: Format string that defines how the pager would be normally rendered rendered. Uses same arguments as pager() method, but returns a simple dictionary in form of: {'current_page': {'attrs': {}, 'href': 'http://example.org/foo/page=1', 'value': 1}, 'first_page': {'attrs': {}, 'href': 'http://example.org/foo/page=1', 'type': 'first_page', 'value': 1}, 'last_page': {'attrs': {}, 'href': 'http://example.org/foo/page=8', 'type': 'last_page', 'value': 8}, 'next_page': {'attrs': {}, 'href': 'HREF', 'type': 'next_page', 'value': 2}, 'previous_page': None, 'range_pages': [{'attrs': {}, 'href': 'http://example.org/foo/page=1', 'type': 'current_page', 'value': 1}, .... {'attrs': {}, 'href': '', 'type': 'span', 'value': '..'}]} The string can contain the following $-tokens that are substituted by the string.Template module: - $first_page: number of first reachable page - $last_page: number of last reachable page - $page: number of currently selected page - $page_count: number of reachable pages - $items_per_page: maximal number of items per page - $first_item: index of first item on the current page - $last_item: index of last item on the current page - $item_count: total number of items - $link_first: link to first page (unless this is first page) - $link_last: link to last page (unless this is last page) - $link_previous: link to previous page (unless this is first page) - $link_next: link to next page (unless this is last page) To render a range of pages the token '~3~' can be used. The number sets the radius of pages around the current page. Example for a range with radius 3: '1 .. 5 6 7 [8] 9 10 11 .. 50' Default: '~2~' url The URL that page links will point to. Make sure it contains the string $page which will be replaced by the actual page number. Must be given unless a url_maker is specified to __init__, in which case this parameter is ignored. symbol_first String to be displayed as the text for the $link_first link above. Default: '&lt;&lt;' (<<) symbol_last String to be displayed as the text for the $link_last link above. Default: '&gt;&gt;' (>>) symbol_previous String to be displayed as the text for the $link_previous link above. Default: '&lt;' (<) symbol_next String to be displayed as the text for the $link_next link above. Default: '&gt;' (>) separator: String that is used to separate page links/numbers in the above range of pages. Default: ' ' show_if_single_page: if True the navigator will be shown even if there is only one page. Default: False link_attr (optional) A dictionary of attributes that get added to A-HREF links pointing to other pages. Can be used to define a CSS style or class to customize the look of links. Example: { 'style':'border: 1px solid green' } Example: { 'class':'pager_link' } curpage_attr (optional) A dictionary of attributes that get added to the current page number in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'border: 3px solid blue' } Example: { 'class':'pager_curpage' } dotdot_attr (optional) A dictionary of attributes that get added to the '..' string in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'color: #808080' } Example: { 'class':'pager_dotdot' } """ link_attr = link_attr or {} curpage_attr = curpage_attr or {} dotdot_attr = dotdot_attr or {} self.curpage_attr = curpage_attr self.separator = separator self.link_attr = link_attr self.dotdot_attr = dotdot_attr self.url = url regex_res = re.search(r"~(\d+)~", format) if regex_res: radius = regex_res.group(1) else: radius = 2 radius = int(radius) self.radius = radius # Compute the first and last page number within the radius # e.g. '1 .. 5 6 [7] 8 9 .. 12' # -> leftmost_page = 5 # -> rightmost_page = 9 leftmost_page = ( max(self.first_page, (self.page - radius)) if self.first_page else None ) rightmost_page = ( min(self.last_page, (self.page + radius)) if self.last_page else None ) nav_items = { "first_page": None, "last_page": None, "previous_page": None, "next_page": None, "current_page": None, "radius": self.radius, "range_pages": [], } if leftmost_page is None or rightmost_page is None: return nav_items nav_items["first_page"] = { "type": "first_page", "value": unicode(symbol_first), "attrs": self.link_attr, "number": self.first_page, "href": self.url_maker(self.first_page), } # Insert dots if there are pages between the first page # and the currently displayed page range if leftmost_page - self.first_page > 1: # Wrap in a SPAN tag if dotdot_attr is set nav_items["range_pages"].append( { "type": "span", "value": "..", "attrs": self.dotdot_attr, "href": "", "number": None, } ) for thispage in range(leftmost_page, rightmost_page + 1): # Highlight the current page number and do not use a link if thispage == self.page: # Wrap in a SPAN tag if curpage_attr is set nav_items["range_pages"].append( { "type": "current_page", "value": unicode(thispage), "number": thispage, "attrs": self.curpage_attr, "href": self.url_maker(thispage), } ) nav_items["current_page"] = { "value": thispage, "attrs": self.curpage_attr, "type": "current_page", "href": self.url_maker(thispage), } # Otherwise create just a link to that page else: nav_items["range_pages"].append( { "type": "page", "value": unicode(thispage), "number": thispage, "attrs": self.link_attr, "href": self.url_maker(thispage), } ) # Insert dots if there are pages between the displayed # page numbers and the end of the page range if self.last_page - rightmost_page > 1: # Wrap in a SPAN tag if dotdot_attr is set nav_items["range_pages"].append( { "type": "span", "value": "..", "attrs": self.dotdot_attr, "href": "", "number": None, } ) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) nav_items["last_page"] = { "type": "last_page", "value": unicode(symbol_last), "attrs": self.link_attr, "href": self.url_maker(self.last_page), "number": self.last_page, } nav_items["previous_page"] = { "type": "previous_page", "value": unicode(symbol_previous), "attrs": self.link_attr, "number": self.previous_page or self.first_page, "href": self.url_maker(self.previous_page or self.first_page), } nav_items["next_page"] = { "type": "next_page", "value": unicode(symbol_next), "attrs": self.link_attr, "number": self.next_page or self.last_page, "href": self.url_maker(self.next_page or self.last_page), } return nav_items
Return map with links to other pages if default pager() function is not suitable solution. format: Format string that defines how the pager would be normally rendered rendered. Uses same arguments as pager() method, but returns a simple dictionary in form of: {'current_page': {'attrs': {}, 'href': 'http://example.org/foo/page=1', 'value': 1}, 'first_page': {'attrs': {}, 'href': 'http://example.org/foo/page=1', 'type': 'first_page', 'value': 1}, 'last_page': {'attrs': {}, 'href': 'http://example.org/foo/page=8', 'type': 'last_page', 'value': 8}, 'next_page': {'attrs': {}, 'href': 'HREF', 'type': 'next_page', 'value': 2}, 'previous_page': None, 'range_pages': [{'attrs': {}, 'href': 'http://example.org/foo/page=1', 'type': 'current_page', 'value': 1}, .... {'attrs': {}, 'href': '', 'type': 'span', 'value': '..'}]} The string can contain the following $-tokens that are substituted by the string.Template module: - $first_page: number of first reachable page - $last_page: number of last reachable page - $page: number of currently selected page - $page_count: number of reachable pages - $items_per_page: maximal number of items per page - $first_item: index of first item on the current page - $last_item: index of last item on the current page - $item_count: total number of items - $link_first: link to first page (unless this is first page) - $link_last: link to last page (unless this is last page) - $link_previous: link to previous page (unless this is first page) - $link_next: link to next page (unless this is last page) To render a range of pages the token '~3~' can be used. The number sets the radius of pages around the current page. Example for a range with radius 3: '1 .. 5 6 7 [8] 9 10 11 .. 50' Default: '~2~' url The URL that page links will point to. Make sure it contains the string $page which will be replaced by the actual page number. Must be given unless a url_maker is specified to __init__, in which case this parameter is ignored. symbol_first String to be displayed as the text for the $link_first link above. Default: '&lt;&lt;' (<<) symbol_last String to be displayed as the text for the $link_last link above. Default: '&gt;&gt;' (>>) symbol_previous String to be displayed as the text for the $link_previous link above. Default: '&lt;' (<) symbol_next String to be displayed as the text for the $link_next link above. Default: '&gt;' (>) separator: String that is used to separate page links/numbers in the above range of pages. Default: ' ' show_if_single_page: if True the navigator will be shown even if there is only one page. Default: False link_attr (optional) A dictionary of attributes that get added to A-HREF links pointing to other pages. Can be used to define a CSS style or class to customize the look of links. Example: { 'style':'border: 1px solid green' } Example: { 'class':'pager_link' } curpage_attr (optional) A dictionary of attributes that get added to the current page number in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'border: 3px solid blue' } Example: { 'class':'pager_curpage' } dotdot_attr (optional) A dictionary of attributes that get added to the '..' string in the pager (which is obviously not a link). If this dictionary is not empty then the elements will be wrapped in a SPAN tag with the given attributes. Example: { 'style':'color: #808080' } Example: { 'class':'pager_dotdot' }
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L515-L771
Pylons/paginate
paginate/__init__.py
Page._range
def _range(self, link_map, radius): """ Return range of linked pages to substiture placeholder in pattern """ leftmost_page = max(self.first_page, (self.page - radius)) rightmost_page = min(self.last_page, (self.page + radius)) nav_items = [] # Create a link to the first page (unless we are on the first page # or there would be no need to insert '..' spacers) if self.page != self.first_page and self.first_page < leftmost_page: page = link_map["first_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) for item in link_map["range_pages"]: nav_items.append(self.link_tag(item)) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) if self.page != self.last_page and rightmost_page < self.last_page: page = link_map["last_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) return self.separator.join(nav_items)
python
def _range(self, link_map, radius): """ Return range of linked pages to substiture placeholder in pattern """ leftmost_page = max(self.first_page, (self.page - radius)) rightmost_page = min(self.last_page, (self.page + radius)) nav_items = [] # Create a link to the first page (unless we are on the first page # or there would be no need to insert '..' spacers) if self.page != self.first_page and self.first_page < leftmost_page: page = link_map["first_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) for item in link_map["range_pages"]: nav_items.append(self.link_tag(item)) # Create a link to the very last page (unless we are on the last # page or there would be no need to insert '..' spacers) if self.page != self.last_page and rightmost_page < self.last_page: page = link_map["last_page"].copy() page["value"] = unicode(page["number"]) nav_items.append(self.link_tag(page)) return self.separator.join(nav_items)
Return range of linked pages to substiture placeholder in pattern
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L773-L799
Pylons/paginate
paginate/__init__.py
Page.default_link_tag
def default_link_tag(item): """ Create an A-HREF tag that points to another page. """ text = item["value"] target_url = item["href"] if not item["href"] or item["type"] in ("span", "current_page"): if item["attrs"]: text = make_html_tag("span", **item["attrs"]) + text + "</span>" return text return make_html_tag("a", text=text, href=target_url, **item["attrs"])
python
def default_link_tag(item): """ Create an A-HREF tag that points to another page. """ text = item["value"] target_url = item["href"] if not item["href"] or item["type"] in ("span", "current_page"): if item["attrs"]: text = make_html_tag("span", **item["attrs"]) + text + "</span>" return text return make_html_tag("a", text=text, href=target_url, **item["attrs"])
Create an A-HREF tag that points to another page.
https://github.com/Pylons/paginate/blob/07e6f62c00a731839ca2da32e6d6a37b31a13d4f/paginate/__init__.py#L813-L825
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger.tag
def tag(self, corpus, tokenize=True): '''Tags a string `corpus`.''' # Assume untokenized corpus has \n between sentences and ' ' between words s_split = SentenceTokenizer().tokenize if tokenize else lambda t: t.split('\n') w_split = WordTokenizer().tokenize if tokenize else lambda s: s.split() def split_sents(corpus): for s in s_split(corpus): yield w_split(s) prev, prev2 = self.START tokens = [] for words in split_sents(corpus): context = self.START + [self._normalize(w) for w in words] + self.END for i, word in enumerate(words): tag = self.tagdict.get(word) if not tag: features = self._get_features(i, word, context, prev, prev2) tag = self.model.predict(features) tokens.append((word, tag)) prev2 = prev prev = tag return tokens
python
def tag(self, corpus, tokenize=True): '''Tags a string `corpus`.''' # Assume untokenized corpus has \n between sentences and ' ' between words s_split = SentenceTokenizer().tokenize if tokenize else lambda t: t.split('\n') w_split = WordTokenizer().tokenize if tokenize else lambda s: s.split() def split_sents(corpus): for s in s_split(corpus): yield w_split(s) prev, prev2 = self.START tokens = [] for words in split_sents(corpus): context = self.START + [self._normalize(w) for w in words] + self.END for i, word in enumerate(words): tag = self.tagdict.get(word) if not tag: features = self._get_features(i, word, context, prev, prev2) tag = self.model.predict(features) tokens.append((word, tag)) prev2 = prev prev = tag return tokens
Tags a string `corpus`.
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L38-L59
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger.train
def train(self, sentences, save_loc=None, nr_iter=5): '''Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations. ''' self._make_tagdict(sentences) self.model.classes = self.classes for iter_ in range(nr_iter): c = 0 n = 0 for words, tags in sentences: prev, prev2 = self.START context = self.START + [self._normalize(w) for w in words] \ + self.END for i, word in enumerate(words): guess = self.tagdict.get(word) if not guess: feats = self._get_features(i, word, context, prev, prev2) guess = self.model.predict(feats) self.model.update(tags[i], guess, feats) prev2 = prev prev = guess c += guess == tags[i] n += 1 random.shuffle(sentences) logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n))) self.model.average_weights() # Pickle as a binary file if save_loc is not None: pickle.dump((self.model.weights, self.tagdict, self.classes), open(save_loc, 'wb'), -1) return None
python
def train(self, sentences, save_loc=None, nr_iter=5): '''Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations. ''' self._make_tagdict(sentences) self.model.classes = self.classes for iter_ in range(nr_iter): c = 0 n = 0 for words, tags in sentences: prev, prev2 = self.START context = self.START + [self._normalize(w) for w in words] \ + self.END for i, word in enumerate(words): guess = self.tagdict.get(word) if not guess: feats = self._get_features(i, word, context, prev, prev2) guess = self.model.predict(feats) self.model.update(tags[i], guess, feats) prev2 = prev prev = guess c += guess == tags[i] n += 1 random.shuffle(sentences) logging.info("Iter {0}: {1}/{2}={3}".format(iter_, c, n, _pc(c, n))) self.model.average_weights() # Pickle as a binary file if save_loc is not None: pickle.dump((self.model.weights, self.tagdict, self.classes), open(save_loc, 'wb'), -1) return None
Train a model from sentences, and save it at ``save_loc``. ``nr_iter`` controls the number of Perceptron training iterations. :param sentences: A list of (words, tags) tuples. :param save_loc: If not ``None``, saves a pickled model in this location. :param nr_iter: Number of training iterations.
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L61-L95
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger.load
def load(self, loc): '''Load a pickled model.''' try: w_td_c = pickle.load(open(loc, 'rb')) except IOError: msg = ("Missing trontagger.pickle file.") raise MissingCorpusError(msg) self.model.weights, self.tagdict, self.classes = w_td_c self.model.classes = self.classes return None
python
def load(self, loc): '''Load a pickled model.''' try: w_td_c = pickle.load(open(loc, 'rb')) except IOError: msg = ("Missing trontagger.pickle file.") raise MissingCorpusError(msg) self.model.weights, self.tagdict, self.classes = w_td_c self.model.classes = self.classes return None
Load a pickled model.
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L97-L106
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger._normalize
def _normalize(self, word): '''Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str ''' if '-' in word and word[0] != '-': return '!HYPHEN' elif word.isdigit() and len(word) == 4: return '!YEAR' elif word[0].isdigit(): return '!DIGITS' else: return word.lower()
python
def _normalize(self, word): '''Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str ''' if '-' in word and word[0] != '-': return '!HYPHEN' elif word.isdigit() and len(word) == 4: return '!YEAR' elif word[0].isdigit(): return '!DIGITS' else: return word.lower()
Normalization used in pre-processing. - All words are lower cased - Digits in the range 1800-2100 are represented as !YEAR; - Other digits are represented as !DIGITS :rtype: str
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L108-L124
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger._get_features
def _get_features(self, i, word, context, prev, prev2): '''Map tokens into a feature representation, implemented as a {hashable: float} dict. If the features change, a new model must be trained. ''' def add(name, *args): features[' '.join((name,) + tuple(args))] += 1 i += len(self.START) features = defaultdict(int) # It's useful to have a constant feature, which acts sort of like a prior add('bias') add('i suffix', word[-3:]) add('i pref1', word[0]) add('i-1 tag', prev) add('i-2 tag', prev2) add('i tag+i-2 tag', prev, prev2) add('i word', context[i]) add('i-1 tag+i word', prev, context[i]) add('i-1 word', context[i-1]) add('i-1 suffix', context[i-1][-3:]) add('i-2 word', context[i-2]) add('i+1 word', context[i+1]) add('i+1 suffix', context[i+1][-3:]) add('i+2 word', context[i+2]) return features
python
def _get_features(self, i, word, context, prev, prev2): '''Map tokens into a feature representation, implemented as a {hashable: float} dict. If the features change, a new model must be trained. ''' def add(name, *args): features[' '.join((name,) + tuple(args))] += 1 i += len(self.START) features = defaultdict(int) # It's useful to have a constant feature, which acts sort of like a prior add('bias') add('i suffix', word[-3:]) add('i pref1', word[0]) add('i-1 tag', prev) add('i-2 tag', prev2) add('i tag+i-2 tag', prev, prev2) add('i word', context[i]) add('i-1 tag+i word', prev, context[i]) add('i-1 word', context[i-1]) add('i-1 suffix', context[i-1][-3:]) add('i-2 word', context[i-2]) add('i+1 word', context[i+1]) add('i+1 suffix', context[i+1][-3:]) add('i+2 word', context[i+2]) return features
Map tokens into a feature representation, implemented as a {hashable: float} dict. If the features change, a new model must be trained.
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L126-L151
sloria/textblob-aptagger
textblob_aptagger/taggers.py
PerceptronTagger._make_tagdict
def _make_tagdict(self, sentences): '''Make a tag dictionary for single-tag words.''' counts = defaultdict(lambda: defaultdict(int)) for words, tags in sentences: for word, tag in zip(words, tags): counts[word][tag] += 1 self.classes.add(tag) freq_thresh = 20 ambiguity_thresh = 0.97 for word, tag_freqs in counts.items(): tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) n = sum(tag_freqs.values()) # Don't add rare words to the tag dictionary # Only add quite unambiguous words if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh: self.tagdict[word] = tag
python
def _make_tagdict(self, sentences): '''Make a tag dictionary for single-tag words.''' counts = defaultdict(lambda: defaultdict(int)) for words, tags in sentences: for word, tag in zip(words, tags): counts[word][tag] += 1 self.classes.add(tag) freq_thresh = 20 ambiguity_thresh = 0.97 for word, tag_freqs in counts.items(): tag, mode = max(tag_freqs.items(), key=lambda item: item[1]) n = sum(tag_freqs.values()) # Don't add rare words to the tag dictionary # Only add quite unambiguous words if n >= freq_thresh and (float(mode) / n) >= ambiguity_thresh: self.tagdict[word] = tag
Make a tag dictionary for single-tag words.
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/taggers.py#L153-L168
sloria/textblob-aptagger
textblob_aptagger/_perceptron.py
train
def train(nr_iter, examples): '''Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations. ''' model = AveragedPerceptron() for i in range(nr_iter): random.shuffle(examples) for features, class_ in examples: scores = model.predict(features) guess, score = max(scores.items(), key=lambda i: i[1]) if guess != class_: model.update(class_, guess, features) model.average_weights() return model
python
def train(nr_iter, examples): '''Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations. ''' model = AveragedPerceptron() for i in range(nr_iter): random.shuffle(examples) for features, class_ in examples: scores = model.predict(features) guess, score = max(scores.items(), key=lambda i: i[1]) if guess != class_: model.update(class_, guess, features) model.average_weights() return model
Return an averaged perceptron model trained on ``examples`` for ``nr_iter`` iterations.
https://github.com/sloria/textblob-aptagger/blob/fb98bbd16a83650cab4819c4b89f0973e60fb3fe/textblob_aptagger/_perceptron.py#L85-L98
statueofmike/rtsp
scripts/rtp.py
RtpPacket.encode
def encode(self, V, P, X, CC, seqNum, M, PT, SSRC, payload): """Encode the RTP packet with header fields and payload.""" timestamp = int(time()) header = bytearray(HEADER_SIZE) # Fill the header bytearray with RTP header fields # ... header[0] = header[0] | V << 6; header[0] = header[0] | P << 5; header[0] = header[0] | X << 4; header[0] = header[0] | CC; header[1] = header[1] | M << 7; header[1] = header[1] | PT; header[2] = (seqNum >> 8) & 0xFF; header[3] = seqNum & 0xFF; header[4] = (timestamp >> 24) & 0xFF; header[5] = (timestamp >> 16) & 0xFF; header[6] = (timestamp >> 8) & 0xFF; header[7] = timestamp & 0xFF; header[8] = (SSRC >> 24) & 0xFF; header[9] = (SSRC >> 16) & 0xFF; header[10] = (SSRC >> 8) & 0xFF; header[11] = SSRC & 0xFF self.header = header # Get the payload # ... self.payload = payload
python
def encode(self, V, P, X, CC, seqNum, M, PT, SSRC, payload): """Encode the RTP packet with header fields and payload.""" timestamp = int(time()) header = bytearray(HEADER_SIZE) # Fill the header bytearray with RTP header fields # ... header[0] = header[0] | V << 6; header[0] = header[0] | P << 5; header[0] = header[0] | X << 4; header[0] = header[0] | CC; header[1] = header[1] | M << 7; header[1] = header[1] | PT; header[2] = (seqNum >> 8) & 0xFF; header[3] = seqNum & 0xFF; header[4] = (timestamp >> 24) & 0xFF; header[5] = (timestamp >> 16) & 0xFF; header[6] = (timestamp >> 8) & 0xFF; header[7] = timestamp & 0xFF; header[8] = (SSRC >> 24) & 0xFF; header[9] = (SSRC >> 16) & 0xFF; header[10] = (SSRC >> 8) & 0xFF; header[11] = SSRC & 0xFF self.header = header # Get the payload # ... self.payload = payload
Encode the RTP packet with header fields and payload.
https://github.com/statueofmike/rtsp/blob/4816de2da3cc9966122c8511943e6db713052a17/scripts/rtp.py#L11-L38