desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'submits the currently selected form.'
| def submit(self, **kw):
| if (self.form is None):
raise BrowserError('No form selected.')
req = self.form.click(**kw)
return self.do_request(req)
|
'Adds a processor to the application.
>>> urls = ("/(.*)", "echo")
>>> app = application(urls, globals())
>>> class echo:
... def GET(self, name): return name
>>> def hello(handler): return "hello, " + handler()
>>> app.add_processor(hello)
>>> app.request("/web.py").data
b\'hello, web.py\''
| def add_processor(self, processor):
| self.processors.append(processor)
|
'Makes request to this application for the specified path and method.
Response will be a storage object with data, status and headers.
>>> urls = ("/hello", "hello")
>>> app = application(urls, globals())
>>> class hello:
... def GET(self):
... web.header(\'Content-Type\', \'text/plain\')
... return "hello"
>>> response = app.request("/hello")
>>> response.data
b\'hello\'
>>> response.status
\'200 OK\'
>>> response.headers[\'Content-Type\']
\'text/plain\'
To use https, use https=True.
>>> urls = ("/redirect", "redirect")
>>> app = application(urls, globals())
>>> class redirect:
... def GET(self): raise web.seeother("/foo")
>>> response = app.request("/redirect")
>>> response.headers[\'Location\']
\'http://0.0.0.0:8080/foo\'
>>> response = app.request("/redirect", https=True)
>>> response.headers[\'Location\']
\'https://0.0.0.0:8080/foo\'
The headers argument specifies HTTP headers as a mapping object
such as a dict.
>>> urls = (\'/ua\', \'uaprinter\')
>>> class uaprinter:
... def GET(self):
... return \'your user-agent is \' + web.ctx.env[\'HTTP_USER_AGENT\']
>>> app = application(urls, globals())
>>> app.request(\'/ua\', headers = {
... \'User-Agent\': \'a small jumping bean/1.0 (compatible)\'
... }).data
b\'your user-agent is a small jumping bean/1.0 (compatible)\''
| def request(self, localpart='/', method='GET', data=None, host='0.0.0.0:8080', headers=None, https=False, **kw):
| (path, maybe_query) = splitquery(localpart)
query = (maybe_query or '')
if ('env' in kw):
env = kw['env']
else:
env = {}
env = dict(env, HTTP_HOST=host, REQUEST_METHOD=method, PATH_INFO=path, QUERY_STRING=query, HTTPS=str(https))
headers = (headers or {})
for (k, v) in headers.items():
env[('HTTP_' + k.upper().replace('-', '_'))] = v
if ('HTTP_CONTENT_LENGTH' in env):
env['CONTENT_LENGTH'] = env.pop('HTTP_CONTENT_LENGTH')
if ('HTTP_CONTENT_TYPE' in env):
env['CONTENT_TYPE'] = env.pop('HTTP_CONTENT_TYPE')
if (method not in ['HEAD', 'GET']):
data = (data or '')
if isinstance(data, dict):
q = urlencode(data)
else:
q = data
env['wsgi.input'] = BytesIO(q.encode('utf-8'))
if ('CONTENT_LENGTH' not in env):
env['CONTENT_LENGTH'] = len(q)
response = web.storage()
def start_response(status, headers):
response.status = status
response.headers = dict(headers)
response.header_items = headers
data = self.wsgifunc()(env, start_response)
response.data = ''.join(data)
return response
|
'Returns a WSGI-compatible function for this application.'
| def wsgifunc(self, *middleware):
| def peep(iterator):
'Peeps into an iterator by doing an iteration\n and returns an equivalent iterator.\n '
try:
firstchunk = next(iterator)
except StopIteration:
firstchunk = ''
return itertools.chain([firstchunk], iterator)
def wsgi(env, start_resp):
self._cleanup()
self.load(env)
try:
if (web.ctx.method.upper() != web.ctx.method):
raise web.nomethod()
result = self.handle_with_processors()
if is_iter(result):
result = peep(result)
else:
result = [result]
except web.HTTPError as e:
result = [e.data]
def build_result(result):
for r in result:
if PY2:
(yield utils.safestr(r))
elif isinstance(r, bytes):
(yield r)
elif isinstance(r, string_types):
(yield r.encode('utf-8'))
else:
(yield str(r).encode('utf-8'))
result = build_result(result)
(status, headers) = (web.ctx.status, web.ctx.headers)
start_resp(status, headers)
def cleanup():
self._cleanup()
(yield '')
return itertools.chain(result, cleanup())
for m in middleware:
wsgi = m(wsgi)
return wsgi
|
'Starts handling requests. If called in a CGI or FastCGI context, it will follow
that protocol. If called from the command line, it will start an HTTP
server on the port named in the first command line argument, or, if there
is no argument, on port 8080.
`middleware` is a list of WSGI middleware which is applied to the resulting WSGI
function.'
| def run(self, *middleware):
| return wsgi.runwsgi(self.wsgifunc(*middleware))
|
'Stops the http server started by run.'
| def stop(self):
| if httpserver.server:
httpserver.server.stop()
httpserver.server = None
|
'Return a CGI handler. This is mostly useful with Google App Engine.
There you can just do:
main = app.cgirun()'
| def cgirun(self, *middleware):
| wsgiapp = self.wsgifunc(*middleware)
try:
from google.appengine.ext.webapp.util import run_wsgi_app
return run_wsgi_app(wsgiapp)
except ImportError:
return wsgiref.handlers.CGIHandler().run(wsgiapp)
|
'Starts the program in a way that will work with Google app engine,
no matter which version you are using (2.5 / 2.7)
If it is 2.5, just normally start it with app.gaerun()
If it is 2.7, make sure to change the app.yaml handler to point to the
global variable that contains the result of app.gaerun()
For example:
in app.yaml (where code.py is where the main code is located)
handlers:
- url: /.*
script: code.app
Make sure that the app variable is globally accessible'
| def gaerun(self, *middleware):
| wsgiapp = self.wsgifunc(*middleware)
try:
version = sys.version_info[:2]
major = version[0]
minor = version[1]
if (major != 2):
raise EnvironmentError('Google App Engine only supports python 2.5 and 2.7')
if (minor == 7):
return wsgiapp
elif (minor == 5):
from google.appengine.ext.webapp.util import run_wsgi_app
return run_wsgi_app(wsgiapp)
else:
raise EnvironmentError('Not a supported platform, use python 2.5 or 2.7')
except ImportError:
return wsgiref.handlers.CGIHandler().run(wsgiapp)
|
'Initializes ctx using env.'
| def load(self, env):
| ctx = web.ctx
ctx.clear()
ctx.status = '200 OK'
ctx.headers = []
ctx.output = ''
ctx.environ = ctx.env = env
ctx.host = env.get('HTTP_HOST')
if (env.get('wsgi.url_scheme') in ['http', 'https']):
ctx.protocol = env['wsgi.url_scheme']
elif (env.get('HTTPS', '').lower() in ['on', 'true', '1']):
ctx.protocol = 'https'
else:
ctx.protocol = 'http'
ctx.homedomain = ((ctx.protocol + '://') + env.get('HTTP_HOST', '[unknown]'))
ctx.homepath = os.environ.get('REAL_SCRIPT_NAME', env.get('SCRIPT_NAME', ''))
ctx.home = (ctx.homedomain + ctx.homepath)
ctx.realhome = ctx.home
ctx.ip = env.get('REMOTE_ADDR')
ctx.method = env.get('REQUEST_METHOD')
ctx.path = env.get('PATH_INFO')
if env.get('SERVER_SOFTWARE', '').startswith('lighttpd/'):
ctx.path = lstrips(env.get('REQUEST_URI').split('?')[0], ctx.homepath)
ctx.path = unquote(ctx.path)
if env.get('QUERY_STRING'):
ctx.query = ('?' + env.get('QUERY_STRING', ''))
else:
ctx.query = ''
ctx.fullpath = (ctx.path + ctx.query)
for (k, v) in iteritems(ctx):
if isinstance(v, bytes):
ctx[k] = v.decode('utf-8', 'replace')
ctx.status = '200 OK'
ctx.app_stack = []
|
'Deletes request to sub application `app` rooted at the directory `dir`.
The home, homepath, path and fullpath values in web.ctx are updated to mimic request
to the subapp and are restored after it is handled.
@@Any issues with when used with yield?'
| def _delegate_sub_application(self, dir, app):
| web.ctx._oldctx = web.storage(web.ctx)
web.ctx.home += dir
web.ctx.homepath += dir
web.ctx.path = web.ctx.path[len(dir):]
web.ctx.fullpath = web.ctx.fullpath[len(dir):]
return app.handle_with_processors()
|
'Returns HTTPError with \'404 not found\' message'
| def notfound(self):
| parent = self.get_parent_app()
if parent:
return parent.notfound()
else:
return web._NotFound()
|
'Returns HTTPError with \'500 internal error\' message'
| def internalerror(self):
| parent = self.get_parent_app()
if parent:
return parent.internalerror()
elif web.config.get('debug'):
return debugerror()
else:
return web._InternalError()
|
'Application processor to setup session for every request'
| def _processor(self, handler):
| self._cleanup()
self._load()
try:
return handler()
finally:
self._save()
|
'Load the session from the store, by the id from cookie'
| def _load(self):
| cookie_name = self._config.cookie_name
cookie_domain = self._config.cookie_domain
cookie_path = self._config.cookie_path
httponly = self._config.httponly
self.session_id = web.cookies().get(cookie_name)
if (self.session_id and (not self._valid_session_id(self.session_id))):
self.session_id = None
self._check_expiry()
if self.session_id:
d = self.store[self.session_id]
self.update(d)
self._validate_ip()
if (not self.session_id):
self.session_id = self._generate_session_id()
if self._initializer:
if isinstance(self._initializer, dict):
self.update(deepcopy(self._initializer))
elif hasattr(self._initializer, '__call__'):
self._initializer()
self.ip = web.ctx.ip
|
'Generate a random id for session'
| def _generate_session_id(self):
| while True:
rand = os.urandom(16)
now = time.time()
secret_key = self._config.secret_key
hashable = ('%s%s%s%s' % (rand, now, utils.safestr(web.ctx.ip), secret_key))
session_id = sha1((hashable if PY2 else hashable.encode('utf-8')))
session_id = session_id.hexdigest()
if (session_id not in self.store):
break
return session_id
|
'Cleanup the stored sessions'
| def _cleanup(self):
| current_time = time.time()
timeout = self._config.timeout
if ((current_time - self._last_cleanup_time) > timeout):
self.store.cleanup(timeout)
self._last_cleanup_time = current_time
|
'Called when an expired session is atime'
| def expired(self):
| self._killed = True
self._save()
raise SessionExpired(self._config.expired_message)
|
'Kill the session, make it no longer available'
| def kill(self):
| del self.store[self.session_id]
self._killed = True
|
'removes all the expired sessions'
| def cleanup(self, timeout):
| raise NotImplementedError()
|
'encodes session dict as a string'
| def encode(self, session_dict):
| pickled = pickle.dumps(session_dict)
return base64.encodestring(pickled)
|
'decodes the data to get back the session dict'
| def decode(self, session_data):
| pickled = base64.decodestring(session_data)
return pickle.loads(pickled)
|
'Parse the next HTTP request start-line and message-headers.'
| def parse_request(self):
| self.rfile = SizeCheckWrapper(self.conn.rfile, self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response('414 Request-URI Too Long', 'The Request-URI sent with the request exceeds the maximum allowed bytes.')
return
else:
if (not success):
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response('413 Request Entity Too Large', 'The headers sent with the request exceed the maximum allowed bytes.')
return
else:
if (not success):
return
self.ready = True
|
'Read self.rfile into self.inheaders. Return success.'
| def read_request_headers(self):
| try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response('400 Bad Request', ex.args[0])
return False
mrbs = self.server.max_request_body_size
if (mrbs and (int(self.inheaders.get('Content-Length', 0)) > mrbs)):
self.simple_response('413 Request Entity Too Large', 'The entity sent with the request exceeds the maximum allowed bytes.')
return False
if (self.response_protocol == 'HTTP/1.1'):
if (self.inheaders.get('Connection', '') == 'close'):
self.close_connection = True
elif (self.inheaders.get('Connection', '') != 'Keep-Alive'):
self.close_connection = True
te = None
if (self.response_protocol == 'HTTP/1.1'):
te = self.inheaders.get('Transfer-Encoding')
if te:
te = [x.strip().lower() for x in te.split(',') if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if (enc == 'chunked'):
self.chunked_read = True
else:
self.simple_response('501 Unimplemented')
self.close_connection = True
return False
if (self.inheaders.get('Expect', '') == '100-continue'):
msg = (self.server.protocol.encode('ascii') + ' 100 Continue\r\n\r\n')
try:
self.conn.wfile.write(msg)
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
return True
|
'Parse a Request-URI into (scheme, authority, path).
Note that Request-URI\'s must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar'
| def parse_request_uri(self, uri):
| if (uri == ASTERISK):
return (None, None, uri)
(scheme, authority, path, params, query, fragment) = urlparse(uri)
if (scheme and (QUESTION_MARK not in scheme)):
return (scheme, authority, path)
if uri.startswith(FORWARD_SLASH):
return (None, None, uri)
else:
return (None, uri, None)
|
'takes quoted string and unquotes % encoded values'
| def unquote_bytes(self, path):
| res = path.split('%')
for i in range(1, len(res)):
item = res[i]
try:
res[i] = (bytes([int(item[:2], 16)]) + item[2:])
except ValueError:
raise
return ''.join(res)
|
'Call the gateway and write its iterable output.'
| def respond(self):
| mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get('Content-Length', 0))
if (mrbs and (mrbs < cl)):
if (not self.sent_headers):
self.simple_response('413 Request Entity Too Large', 'The entity sent with the request exceeds the maximum allowed bytes.')
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and (not self.sent_headers)):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.write('0\r\n\r\n')
|
'Write a simple response back to the client.'
| def simple_response(self, status, msg=''):
| status = str(status)
buf = [(((bytes(self.server.protocol, 'ascii') + SPACE) + bytes(status, 'ISO-8859-1')) + CRLF), bytes(('Content-Length: %s\r\n' % len(msg)), 'ISO-8859-1'), 'Content-Type: text/plain\r\n']
if (status[:3] in ('413', '414')):
self.close_connection = True
if (self.response_protocol == 'HTTP/1.1'):
buf.append('Connection: close\r\n')
else:
status = '400 Bad Request'
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode('ISO-8859-1')
buf.append(msg)
try:
self.conn.wfile.write(''.join(buf))
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
|
'Write unbuffered data to the client.'
| def write(self, chunk):
| if (self.chunked_write and chunk):
buf = [bytes(hex(len(chunk)), 'ASCII')[2:], CRLF, chunk, CRLF]
self.conn.wfile.write(EMPTY.join(buf))
else:
self.conn.wfile.write(chunk)
|
'Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.'
| def send_headers(self):
| hkeys = [key.lower() for (key, value) in self.outheaders]
status = int(self.status[:3])
if (status == 413):
self.close_connection = True
elif ('content-length' not in hkeys):
if ((status < 200) or (status in (204, 205, 304))):
pass
elif ((self.response_protocol == 'HTTP/1.1') and (self.method != 'HEAD')):
self.chunked_write = True
self.outheaders.append(('Transfer-Encoding', 'chunked'))
else:
self.close_connection = True
if ('connection' not in hkeys):
if (self.response_protocol == 'HTTP/1.1'):
if self.close_connection:
self.outheaders.append(('Connection', 'close'))
elif (not self.close_connection):
self.outheaders.append(('Connection', 'Keep-Alive'))
if ((not self.close_connection) and (not self.chunked_read)):
remaining = getattr(self.rfile, 'remaining', 0)
if (remaining > 0):
self.rfile.read(remaining)
if ('date' not in hkeys):
self.outheaders.append(('Date', email.utils.formatdate(usegmt=True).encode('ISO-8859-1')))
if ('server' not in hkeys):
self.outheaders.append(('Server', self.server.server_name.encode('ISO-8859-1')))
buf = [(((self.server.protocol.encode('ascii') + SPACE) + self.status) + CRLF)]
for (k, v) in self.outheaders:
buf.append(((((k + COLON) + SPACE) + v) + CRLF))
buf.append(CRLF)
self.conn.wfile.write(EMPTY.join(buf))
|
'Read each request and respond appropriately.'
| def communicate(self):
| request_seen = False
try:
while True:
req = None
req = self.RequestHandlerClass(self.server, self)
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if (not req.ready):
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
if ((errnum == 'timed out') or (errnum == 'The read operation timed out')):
if ((not request_seen) or (req and req.started_request)):
if (req and (not req.sent_headers)):
try:
req.simple_response('408 Request Timeout')
except FatalSSLAlert:
return
elif (errnum not in socket_errors_to_ignore):
self.server.error_log(('socket.error %s' % repr(errnum)), level=logging.WARNING, traceback=True)
if (req and (not req.sent_headers)):
try:
req.simple_response('500 Internal Server Error')
except FatalSSLAlert:
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
return
except NoSSLError:
if (req and (not req.sent_headers)):
self.wfile = CP_makefile(self.socket._sock, 'wb', self.wbufsize)
req.simple_response('400 Bad Request', 'The client sent a plain HTTP request, but this server only speaks HTTPS on this port.')
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if (req and (not req.sent_headers)):
try:
req.simple_response('500 Internal Server Error')
except FatalSSLAlert:
return
|
'Close the socket underlying this connection.'
| def close(self):
| self.rfile.close()
if (not self.linger):
self.socket.close()
else:
pass
|
'Start the pool of threads.'
| def start(self):
| for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName(('CP Server ' + worker.getName()))
worker.start()
for worker in self._threads:
while (not worker.ready):
time.sleep(0.1)
|
'Number of worker threads which are idle. Read-only.'
| def _get_idle(self):
| return len([t for t in self._threads if (t.conn is None)])
|
'Spawn new worker threads (not above self.max).'
| def grow(self, amount):
| if (self.max > 0):
budget = max((self.max - len(self._threads)), 0)
else:
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while (not all((worker.ready for worker in workers))):
time.sleep(0.1)
self._threads.extend(workers)
|
'Kill off worker threads (not below self.min).'
| def shrink(self, amount):
| for t in self._threads:
if (not t.isAlive()):
self._threads.remove(t)
amount -= 1
n_extra = max((len(self._threads) - self.min), 0)
n_to_remove = min(amount, n_extra)
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
|
'Run the server forever.'
| def start(self):
| self._interrupt = None
if (self.software is None):
self.software = ('%s Server' % self.version)
if isinstance(self.bind_addr, basestring):
try:
os.unlink(self.bind_addr)
except:
pass
try:
os.chmod(self.bind_addr, 511)
except:
pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, '', self.bind_addr)]
else:
(host, port) = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if (':' in self.bind_addr[0]):
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, '', (self.bind_addr + (0, 0)))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, '', self.bind_addr)]
self.socket = None
msg = 'No socket could be created'
for res in info:
(af, socktype, proto, canonname, sa) = res
try:
self.bind(af, socktype, proto)
except socket.error as serr:
msg = ('%s -- (%s: %s)' % (msg, sa, serr))
if self.socket:
self.socket.close()
self.socket = None
continue
break
if (not self.socket):
raise socket.error(msg)
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log('Error in HTTPServer.tick', level=logging.ERROR, traceback=True)
if self.interrupt:
while (self.interrupt is True):
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
|
'Create (or recreate) the actual socket object.'
| def bind(self, family, type, proto=0):
| self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if (self.nodelay and (not isinstance(self.bind_addr, str))):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if (self.ssl_adapter is not None):
self.socket = self.ssl_adapter.bind(self.socket)
if (hasattr(socket, 'AF_INET6') and (family == socket.AF_INET6) and (self.bind_addr[0] in ('::', '::0', '::0.0.0.0'))):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
pass
self.socket.bind(self.bind_addr)
|
'Accept a new connection and put it on the Queue.'
| def tick(self):
| try:
(s, addr) = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if (not self.ready):
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_makefile
ssl_env = {}
if (self.ssl_adapter is not None):
try:
(s, ssl_env) = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = 'The client sent a plain HTTP request, but this server only speaks HTTPS on this port.'
buf = [('%s 400 Bad Request\r\n' % self.protocol), ('Content-Length: %s\r\n' % len(msg)), 'Content-Type: text/plain\r\n\r\n', msg]
wfile = makefile(s, 'wb', DEFAULT_BUFFER_SIZE)
try:
wfile.write(''.join(buf).encode('ISO-8859-1'))
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
return
if (not s):
return
makefile = self.ssl_adapter.makefile
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if (not isinstance(self.bind_addr, basestring)):
if (addr is None):
if (len(s.getsockname()) == 2):
addr = ('0.0.0.0', 0)
else:
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
try:
self.requests.put(conn)
except queue.Full:
conn.close()
return
except socket.timeout:
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if (x.args[0] in socket_error_eintr):
return
if (x.args[0] in socket_errors_nonblocking):
return
if (x.args[0] in socket_errors_to_ignore):
return
raise
|
'Gracefully shutdown a server that is serving forever.'
| def stop(self):
| self.ready = False
if (self._start_time is not None):
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, 'socket', None)
if sock:
if (not isinstance(self.bind_addr, basestring)):
try:
(host, port) = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
else:
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
(af, socktype, proto, canonname, sa) = res
s = None
try:
s = socket.socket(af, socktype, proto)
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, 'close'):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
|
'Process the current request. Must be overridden in a subclass.'
| def respond(self):
| raise NotImplemented
|
'Return a new environ dict targeting the given wsgi.version'
| def get_environ(self):
| raise NotImplemented
|
'Process the current request.'
| def respond(self):
| response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, 'close'):
response.close()
|
'WSGI callable to begin the HTTP response.'
| def start_response(self, status, headers, exc_info=None):
| if (self.started_response and (not exc_info)):
raise AssertionError('WSGI start_response called a second time with no exc_info.')
self.started_response = True
if self.req.sent_headers:
try:
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
finally:
exc_info = None
if (not isinstance(status, str)):
raise TypeError('WSGI response status is not of type str.')
self.req.status = status.encode('ISO-8859-1')
for (k, v) in headers:
if (not isinstance(k, str)):
raise TypeError(('WSGI response header key %r is not of type str.' % k))
if (not isinstance(v, str)):
raise TypeError(('WSGI response header value %r is not of type str.' % v))
if (k.lower() == 'content-length'):
self.remaining_bytes_out = int(v)
self.req.outheaders.append((k.encode('ISO-8859-1'), v.encode('ISO-8859-1')))
return self.write
|
'WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).'
| def write(self, chunk):
| if (not self.started_response):
raise AssertionError('WSGI write called before start_response.')
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if ((rbo is not None) and (chunklen > rbo)):
if (not self.req.sent_headers):
self.req.simple_response('500 Internal Server Error', 'The requested resource returned more bytes than the declared Content-Length.')
else:
chunk = chunk[:rbo]
if (not self.req.sent_headers):
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if (rbo is not None):
rbo -= chunklen
if (rbo < 0):
raise ValueError('Response body exceeds the declared Content-Length.')
|
'Return a new environ dict targeting the given wsgi.version'
| def get_environ(self):
| req = self.req
env = {'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 'PATH_INFO': req.path.decode('ISO-8859-1'), 'QUERY_STRING': req.qs.decode('ISO-8859-1'), 'REMOTE_ADDR': (req.conn.remote_addr or ''), 'REMOTE_PORT': str((req.conn.remote_port or '')), 'REQUEST_METHOD': req.method.decode('ISO-8859-1'), 'REQUEST_URI': req.uri.decode('ISO-8859-1'), 'SCRIPT_NAME': '', 'SERVER_NAME': req.server.server_name, 'SERVER_PROTOCOL': req.request_protocol.decode('ISO-8859-1'), 'SERVER_SOFTWARE': req.server.software, 'wsgi.errors': sys.stderr, 'wsgi.input': req.rfile, 'wsgi.multiprocess': False, 'wsgi.multithread': True, 'wsgi.run_once': False, 'wsgi.url_scheme': req.scheme.decode('ISO-8859-1'), 'wsgi.version': (1, 0)}
if isinstance(req.server.bind_addr, basestring):
env['SERVER_PORT'] = ''
else:
env['SERVER_PORT'] = str(req.server.bind_addr[1])
for (k, v) in req.inheaders.items():
k = k.decode('ISO-8859-1').upper().replace('-', '_')
env[('HTTP_' + k)] = v.decode('ISO-8859-1')
ct = env.pop('HTTP_CONTENT_TYPE', None)
if (ct is not None):
env['CONTENT_TYPE'] = ct
cl = env.pop('HTTP_CONTENT_LENGTH', None)
if (cl is not None):
env['CONTENT_LENGTH'] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
|
'Return a new environ dict targeting the given wsgi.version'
| def get_environ(self):
| req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = env_10.copy()
env['wsgi.version'] = ('u', 0)
env.setdefault('wsgi.url_encoding', 'utf-8')
try:
env['PATH_INFO'] = req.path.decode(env['wsgi.url_encoding'])
env['QUERY_STRING'] = req.qs.decode(env['wsgi.url_encoding'])
except UnicodeDecodeError:
env['wsgi.url_encoding'] = 'ISO-8859-1'
env['PATH_INFO'] = env_10['PATH_INFO']
env['QUERY_STRING'] = env_10['QUERY_STRING']
return env
|
'Wrap and return the given socket.'
| def bind(self, sock):
| return sock
|
'Wrap and return the given socket, plus WSGI environ entries.'
| def wrap(self, sock):
| try:
if (self.context is not None):
s = self.context.wrap_socket(sock, do_handshake_on_connect=True, server_side=True)
else:
s = ssl.wrap_socket(sock, do_handshake_on_connect=True, server_side=True, certfile=self.certificate, keyfile=self.private_key, ssl_version=ssl.PROTOCOL_SSLv23, ca_certs=self.certificate_chain)
except ssl.SSLError:
e = sys.exc_info()[1]
if (e.errno == ssl.SSL_ERROR_EOF):
return (None, {})
elif (e.errno == ssl.SSL_ERROR_SSL):
if e.args[1].endswith('http request'):
raise wsgiserver.NoSSLError
elif e.args[1].endswith('unknown protocol'):
return (None, {})
raise
return (s, self.get_environ(s))
|
'Create WSGI environ entries to be merged into each request.'
| def get_environ(self, sock):
| cipher = sock.cipher()
ssl_environ = {'wsgi.url_scheme': 'https', 'HTTPS': 'on', 'SSL_PROTOCOL': cipher[1], 'SSL_CIPHER': cipher[0]}
return ssl_environ
|
'Wrap the given call with SSL error-trapping.
is_reader: if False EOF errors will be raised. If True, EOF errors
will return "" (to emulate normal sockets).'
| def _safe_call(self, is_reader, call, *args, **kwargs):
| start = time.time()
while True:
try:
return call(*args, **kwargs)
except SSL.WantReadError:
time.sleep(self.ssl_retry)
except SSL.WantWriteError:
time.sleep(self.ssl_retry)
except SSL.SysCallError as e:
if (is_reader and (e.args == ((-1), 'Unexpected EOF'))):
return ''
errnum = e.args[0]
if (is_reader and (errnum in wsgiserver.socket_errors_to_ignore)):
return ''
raise socket.error(errnum)
except SSL.Error as e:
if (is_reader and (e.args == ((-1), 'Unexpected EOF'))):
return ''
thirdarg = None
try:
thirdarg = e.args[0][0][2]
except IndexError:
pass
if (thirdarg == 'http request'):
raise wsgiserver.NoSSLError()
raise wsgiserver.FatalSSLAlert(*e.args)
except:
raise
if ((time.time() - start) > self.ssl_timeout):
raise socket.timeout('timed out')
|
'Wrap and return the given socket.'
| def bind(self, sock):
| if (self.context is None):
self.context = self.get_context()
conn = SSLConnection(self.context, sock)
self._environ = self.get_environ()
return conn
|
'Wrap and return the given socket, plus WSGI environ entries.'
| def wrap(self, sock):
| return (sock, self._environ.copy())
|
'Return an SSL.Context from self attributes.'
| def get_context(self):
| c = SSL.Context(SSL.SSLv23_METHOD)
c.use_privatekey_file(self.private_key)
if self.certificate_chain:
c.load_verify_locations(self.certificate_chain)
c.use_certificate_file(self.certificate)
return c
|
'Return WSGI environ entries to be merged into each request.'
| def get_environ(self):
| ssl_environ = {'HTTPS': 'on'}
if self.certificate:
cert = open(self.certificate, 'rb').read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert)
ssl_environ.update({'SSL_SERVER_M_VERSION': cert.get_version(), 'SSL_SERVER_M_SERIAL': cert.get_serial_number()})
for (prefix, dn) in [('I', cert.get_issuer()), ('S', cert.get_subject())]:
dnstr = str(dn)[18:(-2)]
wsgikey = ('SSL_SERVER_%s_DN' % prefix)
ssl_environ[wsgikey] = dnstr
while dnstr:
pos = dnstr.rfind('=')
(dnstr, value) = (dnstr[:pos], dnstr[(pos + 1):])
pos = dnstr.rfind('/')
(dnstr, key) = (dnstr[:pos], dnstr[(pos + 1):])
if (key and value):
wsgikey = ('SSL_SERVER_%s_DN_%s' % (prefix, key))
ssl_environ[wsgikey] = value
return ssl_environ
|
'Parse the next HTTP request start-line and message-headers.'
| def parse_request(self):
| self.rfile = SizeCheckWrapper(self.conn.rfile, self.server.max_request_header_size)
try:
success = self.read_request_line()
except MaxSizeExceeded:
self.simple_response('414 Request-URI Too Long', 'The Request-URI sent with the request exceeds the maximum allowed bytes.')
return
else:
if (not success):
return
try:
success = self.read_request_headers()
except MaxSizeExceeded:
self.simple_response('413 Request Entity Too Large', 'The headers sent with the request exceed the maximum allowed bytes.')
return
else:
if (not success):
return
self.ready = True
|
'Read self.rfile into self.inheaders. Return success.'
| def read_request_headers(self):
| try:
read_headers(self.rfile, self.inheaders)
except ValueError:
ex = sys.exc_info()[1]
self.simple_response('400 Bad Request', ex.args[0])
return False
mrbs = self.server.max_request_body_size
if (mrbs and (int(self.inheaders.get('Content-Length', 0)) > mrbs)):
self.simple_response('413 Request Entity Too Large', 'The entity sent with the request exceeds the maximum allowed bytes.')
return False
if (self.response_protocol == 'HTTP/1.1'):
if (self.inheaders.get('Connection', '') == 'close'):
self.close_connection = True
elif (self.inheaders.get('Connection', '') != 'Keep-Alive'):
self.close_connection = True
te = None
if (self.response_protocol == 'HTTP/1.1'):
te = self.inheaders.get('Transfer-Encoding')
if te:
te = [x.strip().lower() for x in te.split(',') if x.strip()]
self.chunked_read = False
if te:
for enc in te:
if (enc == 'chunked'):
self.chunked_read = True
else:
self.simple_response('501 Unimplemented')
self.close_connection = True
return False
if (self.inheaders.get('Expect', '') == '100-continue'):
msg = (self.server.protocol + ' 100 Continue\r\n\r\n')
try:
self.conn.wfile.sendall(msg)
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
return True
|
'Parse a Request-URI into (scheme, authority, path).
Note that Request-URI\'s must be one of::
Request-URI = "*" | absoluteURI | abs_path | authority
Therefore, a Request-URI which starts with a double forward-slash
cannot be a "net_path"::
net_path = "//" authority [ abs_path ]
Instead, it must be interpreted as an "abs_path" with an empty first
path segment::
abs_path = "/" path_segments
path_segments = segment *( "/" segment )
segment = *pchar *( ";" param )
param = *pchar'
| def parse_request_uri(self, uri):
| if (uri == ASTERISK):
return (None, None, uri)
(scheme, authority, path, params, query, fragment) = urlparse(uri)
if (scheme and (QUESTION_MARK not in scheme)):
return (scheme, authority, path)
if uri.startswith(FORWARD_SLASH):
return (None, None, uri)
else:
return (None, uri, None)
|
'Call the gateway and write its iterable output.'
| def respond(self):
| mrbs = self.server.max_request_body_size
if self.chunked_read:
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
else:
cl = int(self.inheaders.get('Content-Length', 0))
if (mrbs and (mrbs < cl)):
if (not self.sent_headers):
self.simple_response('413 Request Entity Too Large', 'The entity sent with the request exceeds the maximum allowed bytes.')
return
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
self.server.gateway(self).respond()
if (self.ready and (not self.sent_headers)):
self.sent_headers = True
self.send_headers()
if self.chunked_write:
self.conn.wfile.sendall('0\r\n\r\n')
|
'Write a simple response back to the client.'
| def simple_response(self, status, msg=''):
| status = str(status)
buf = [(((self.server.protocol + SPACE) + status) + CRLF), ('Content-Length: %s\r\n' % len(msg)), 'Content-Type: text/plain\r\n']
if (status[:3] in ('413', '414')):
self.close_connection = True
if (self.response_protocol == 'HTTP/1.1'):
buf.append('Connection: close\r\n')
else:
status = '400 Bad Request'
buf.append(CRLF)
if msg:
if isinstance(msg, unicodestr):
msg = msg.encode('ISO-8859-1')
buf.append(msg)
try:
self.conn.wfile.sendall(''.join(buf))
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
|
'Write unbuffered data to the client.'
| def write(self, chunk):
| if (self.chunked_write and chunk):
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
self.conn.wfile.sendall(EMPTY.join(buf))
else:
self.conn.wfile.sendall(chunk)
|
'Assert, process, and send the HTTP response message-headers.
You must set self.status, and self.outheaders before calling this.'
| def send_headers(self):
| hkeys = [key.lower() for (key, value) in self.outheaders]
status = int(self.status[:3])
if (status == 413):
self.close_connection = True
elif ('content-length' not in hkeys):
if ((status < 200) or (status in (204, 205, 304))):
pass
elif ((self.response_protocol == 'HTTP/1.1') and (self.method != 'HEAD')):
self.chunked_write = True
self.outheaders.append(('Transfer-Encoding', 'chunked'))
else:
self.close_connection = True
if ('connection' not in hkeys):
if (self.response_protocol == 'HTTP/1.1'):
if self.close_connection:
self.outheaders.append(('Connection', 'close'))
elif (not self.close_connection):
self.outheaders.append(('Connection', 'Keep-Alive'))
if ((not self.close_connection) and (not self.chunked_read)):
remaining = getattr(self.rfile, 'remaining', 0)
if (remaining > 0):
self.rfile.read(remaining)
if ('date' not in hkeys):
self.outheaders.append(('Date', email.utils.formatdate()))
if ('server' not in hkeys):
self.outheaders.append(('Server', self.server.server_name))
buf = [(((self.server.protocol + SPACE) + self.status) + CRLF)]
for (k, v) in self.outheaders:
buf.append(((((k + COLON) + SPACE) + v) + CRLF))
buf.append(CRLF)
self.conn.wfile.sendall(EMPTY.join(buf))
|
'Sendall for non-blocking sockets.'
| def sendall(self, data):
| while data:
try:
bytes_sent = self.send(data)
data = data[bytes_sent:]
except socket.error as e:
if (e.args[0] not in socket_errors_nonblocking):
raise
|
'Read each request and respond appropriately.'
| def communicate(self):
| request_seen = False
try:
while True:
req = None
req = self.RequestHandlerClass(self.server, self)
req.parse_request()
if self.server.stats['Enabled']:
self.requests_seen += 1
if (not req.ready):
return
request_seen = True
req.respond()
if req.close_connection:
return
except socket.error:
e = sys.exc_info()[1]
errnum = e.args[0]
if ((errnum == 'timed out') or (errnum == 'The read operation timed out')):
if ((not request_seen) or (req and req.started_request)):
if (req and (not req.sent_headers)):
try:
req.simple_response('408 Request Timeout')
except FatalSSLAlert:
return
elif (errnum not in socket_errors_to_ignore):
self.server.error_log(('socket.error %s' % repr(errnum)), level=logging.WARNING, traceback=True)
if (req and (not req.sent_headers)):
try:
req.simple_response('500 Internal Server Error')
except FatalSSLAlert:
return
return
except (KeyboardInterrupt, SystemExit):
raise
except FatalSSLAlert:
return
except NoSSLError:
if (req and (not req.sent_headers)):
self.wfile = CP_fileobject(self.socket._sock, 'wb', self.wbufsize)
req.simple_response('400 Bad Request', 'The client sent a plain HTTP request, but this server only speaks HTTPS on this port.')
self.linger = True
except Exception:
e = sys.exc_info()[1]
self.server.error_log(repr(e), level=logging.ERROR, traceback=True)
if (req and (not req.sent_headers)):
try:
req.simple_response('500 Internal Server Error')
except FatalSSLAlert:
return
|
'Close the socket underlying this connection.'
| def close(self):
| self.rfile.close()
if (not self.linger):
if hasattr(self.socket, '_sock'):
self.socket._sock.close()
self.socket.close()
else:
pass
|
'Start the pool of threads.'
| def start(self):
| for i in range(self.min):
self._threads.append(WorkerThread(self.server))
for worker in self._threads:
worker.setName(('CP Server ' + worker.getName()))
worker.start()
for worker in self._threads:
while (not worker.ready):
time.sleep(0.1)
|
'Number of worker threads which are idle. Read-only.'
| def _get_idle(self):
| return len([t for t in self._threads if (t.conn is None)])
|
'Spawn new worker threads (not above self.max).'
| def grow(self, amount):
| if (self.max > 0):
budget = max((self.max - len(self._threads)), 0)
else:
budget = float('inf')
n_new = min(amount, budget)
workers = [self._spawn_worker() for i in range(n_new)]
while (not self._all(operator.attrgetter('ready'), workers)):
time.sleep(0.1)
self._threads.extend(workers)
|
'Kill off worker threads (not below self.min).'
| def shrink(self, amount):
| for t in self._threads:
if (not t.isAlive()):
self._threads.remove(t)
amount -= 1
n_extra = max((len(self._threads) - self.min), 0)
n_to_remove = min(amount, n_extra)
for n in range(n_to_remove):
self._queue.put(_SHUTDOWNREQUEST)
|
'Run the server forever.'
| def start(self):
| self._interrupt = None
if (self.software is None):
self.software = ('%s Server' % self.version)
if isinstance(self.bind_addr, basestring):
try:
os.unlink(self.bind_addr)
except:
pass
try:
os.chmod(self.bind_addr, 511)
except:
pass
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, '', self.bind_addr)]
else:
(host, port) = self.bind_addr
try:
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
except socket.gaierror:
if (':' in self.bind_addr[0]):
info = [(socket.AF_INET6, socket.SOCK_STREAM, 0, '', (self.bind_addr + (0, 0)))]
else:
info = [(socket.AF_INET, socket.SOCK_STREAM, 0, '', self.bind_addr)]
self.socket = None
msg = 'No socket could be created'
for res in info:
(af, socktype, proto, canonname, sa) = res
try:
self.bind(af, socktype, proto)
except socket.error as serr:
msg = ('%s -- (%s: %s)' % (msg, sa, serr))
if self.socket:
self.socket.close()
self.socket = None
continue
break
if (not self.socket):
raise socket.error(msg)
self.socket.settimeout(1)
self.socket.listen(self.request_queue_size)
self.requests.start()
self.ready = True
self._start_time = time.time()
while self.ready:
try:
self.tick()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.error_log('Error in HTTPServer.tick', level=logging.ERROR, traceback=True)
if self.interrupt:
while (self.interrupt is True):
time.sleep(0.1)
if self.interrupt:
raise self.interrupt
|
'Create (or recreate) the actual socket object.'
| def bind(self, family, type, proto=0):
| self.socket = socket.socket(family, type, proto)
prevent_socket_inheritance(self.socket)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
if (self.nodelay and (not isinstance(self.bind_addr, str))):
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
if (self.ssl_adapter is not None):
self.socket = self.ssl_adapter.bind(self.socket)
if (hasattr(socket, 'AF_INET6') and (family == socket.AF_INET6) and (self.bind_addr[0] in ('::', '::0', '::0.0.0.0'))):
try:
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
except (AttributeError, socket.error):
pass
self.socket.bind(self.bind_addr)
|
'Accept a new connection and put it on the Queue.'
| def tick(self):
| try:
(s, addr) = self.socket.accept()
if self.stats['Enabled']:
self.stats['Accepts'] += 1
if (not self.ready):
return
prevent_socket_inheritance(s)
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
makefile = CP_fileobject
ssl_env = {}
if (self.ssl_adapter is not None):
try:
(s, ssl_env) = self.ssl_adapter.wrap(s)
except NoSSLError:
msg = 'The client sent a plain HTTP request, but this server only speaks HTTPS on this port.'
buf = [('%s 400 Bad Request\r\n' % self.protocol), ('Content-Length: %s\r\n' % len(msg)), 'Content-Type: text/plain\r\n\r\n', msg]
wfile = makefile(s._sock, 'wb', DEFAULT_BUFFER_SIZE)
try:
wfile.sendall(''.join(buf))
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
return
if (not s):
return
makefile = self.ssl_adapter.makefile
if hasattr(s, 'settimeout'):
s.settimeout(self.timeout)
conn = self.ConnectionClass(self, s, makefile)
if (not isinstance(self.bind_addr, basestring)):
if (addr is None):
if (len(s.getsockname()) == 2):
addr = ('0.0.0.0', 0)
else:
addr = ('::', 0)
conn.remote_addr = addr[0]
conn.remote_port = addr[1]
conn.ssl_env = ssl_env
try:
self.requests.put(conn)
except queue.Full:
conn.close()
return
except socket.timeout:
return
except socket.error:
x = sys.exc_info()[1]
if self.stats['Enabled']:
self.stats['Socket Errors'] += 1
if (x.args[0] in socket_error_eintr):
return
if (x.args[0] in socket_errors_nonblocking):
return
if (x.args[0] in socket_errors_to_ignore):
return
raise
|
'Gracefully shutdown a server that is serving forever.'
| def stop(self):
| self.ready = False
if (self._start_time is not None):
self._run_time += (time.time() - self._start_time)
self._start_time = None
sock = getattr(self, 'socket', None)
if sock:
if (not isinstance(self.bind_addr, basestring)):
try:
(host, port) = sock.getsockname()[:2]
except socket.error:
x = sys.exc_info()[1]
if (x.args[0] not in socket_errors_to_ignore):
raise
else:
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM):
(af, socktype, proto, canonname, sa) = res
s = None
try:
s = socket.socket(af, socktype, proto)
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
if hasattr(sock, 'close'):
sock.close()
self.socket = None
self.requests.stop(self.shutdown_timeout)
|
'Process the current request. Must be overridden in a subclass.'
| def respond(self):
| raise NotImplemented
|
'Return a new environ dict targeting the given wsgi.version'
| def get_environ(self):
| raise NotImplemented
|
'Process the current request.'
| def respond(self):
| response = self.req.server.wsgi_app(self.env, self.start_response)
try:
for chunk in response:
if chunk:
if isinstance(chunk, unicodestr):
chunk = chunk.encode('ISO-8859-1')
self.write(chunk)
finally:
if hasattr(response, 'close'):
response.close()
|
'WSGI callable to begin the HTTP response.'
| def start_response(self, status, headers, exc_info=None):
| if (self.started_response and (not exc_info)):
raise AssertionError('WSGI start_response called a second time with no exc_info.')
self.started_response = True
if self.req.sent_headers:
try:
raise exc_info[0], exc_info[1], exc_info[2]
finally:
exc_info = None
self.req.status = status
for (k, v) in headers:
if (not isinstance(k, str)):
raise TypeError(('WSGI response header key %r is not of type str.' % k))
if (not isinstance(v, str)):
raise TypeError(('WSGI response header value %r is not of type str.' % v))
if (k.lower() == 'content-length'):
self.remaining_bytes_out = int(v)
self.req.outheaders.extend(headers)
return self.write
|
'WSGI callable to write unbuffered data to the client.
This method is also used internally by start_response (to write
data from the iterable returned by the WSGI application).'
| def write(self, chunk):
| if (not self.started_response):
raise AssertionError('WSGI write called before start_response.')
chunklen = len(chunk)
rbo = self.remaining_bytes_out
if ((rbo is not None) and (chunklen > rbo)):
if (not self.req.sent_headers):
self.req.simple_response('500 Internal Server Error', 'The requested resource returned more bytes than the declared Content-Length.')
else:
chunk = chunk[:rbo]
if (not self.req.sent_headers):
self.req.sent_headers = True
self.req.send_headers()
self.req.write(chunk)
if (rbo is not None):
rbo -= chunklen
if (rbo < 0):
raise ValueError('Response body exceeds the declared Content-Length.')
|
'Return a new environ dict targeting the given wsgi.version'
| def get_environ(self):
| req = self.req
env = {'ACTUAL_SERVER_PROTOCOL': req.server.protocol, 'PATH_INFO': req.path, 'QUERY_STRING': req.qs, 'REMOTE_ADDR': (req.conn.remote_addr or ''), 'REMOTE_PORT': str((req.conn.remote_port or '')), 'REQUEST_METHOD': req.method, 'REQUEST_URI': req.uri, 'SCRIPT_NAME': '', 'SERVER_NAME': req.server.server_name, 'SERVER_PROTOCOL': req.request_protocol, 'SERVER_SOFTWARE': req.server.software, 'wsgi.errors': sys.stderr, 'wsgi.input': req.rfile, 'wsgi.multiprocess': False, 'wsgi.multithread': True, 'wsgi.run_once': False, 'wsgi.url_scheme': req.scheme, 'wsgi.version': (1, 0)}
if isinstance(req.server.bind_addr, basestring):
env['SERVER_PORT'] = ''
else:
env['SERVER_PORT'] = str(req.server.bind_addr[1])
for (k, v) in req.inheaders.iteritems():
env[('HTTP_' + k.upper().replace('-', '_'))] = v
ct = env.pop('HTTP_CONTENT_TYPE', None)
if (ct is not None):
env['CONTENT_TYPE'] = ct
cl = env.pop('HTTP_CONTENT_LENGTH', None)
if (cl is not None):
env['CONTENT_LENGTH'] = cl
if req.conn.ssl_env:
env.update(req.conn.ssl_env)
return env
|
'Return a new environ dict targeting the given wsgi.version'
| def get_environ(self):
| req = self.req
env_10 = WSGIGateway_10.get_environ(self)
env = dict([(k.decode('ISO-8859-1'), v) for (k, v) in env_10.iteritems()])
env[u'wsgi.version'] = ('u', 0)
env.setdefault(u'wsgi.url_encoding', u'utf-8')
try:
for key in [u'PATH_INFO', u'SCRIPT_NAME', u'QUERY_STRING']:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
except UnicodeDecodeError:
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
for key in [u'PATH_INFO', u'SCRIPT_NAME', u'QUERY_STRING']:
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
for (k, v) in sorted(env.items()):
if (isinstance(v, str) and (k not in ('REQUEST_URI', 'wsgi.input'))):
env[k] = v.decode('ISO-8859-1')
return env
|
'Overriding this method we only allow its execution
if current user belongs to the group allowed for CSV data import.
An exception is raised otherwise, and also log the import attempt.'
| @api.model
def load(self, fields, data):
| current_user = self.env.user
allowed_group = 'base_import_security_group.group_import_csv'
allowed_group_id = self.env.ref(allowed_group, raise_if_not_found=False)
if ((not allowed_group_id) or current_user.has_group(allowed_group)):
res = super(Base, self).load(fields=fields, data=data)
else:
msg = ('User (ID: %s) is not allowed to import data in model %s.' % (self.env.uid, self._name))
_logger.info(msg)
messages = []
info = {}
messages.append(dict(info, type='error', message=msg, moreinfo=None))
res = {'ids': None, 'messages': messages}
return res
|
'Verify that the button is either visible or invisible.
After the adjacent button is loaded, allow for a second for
the asynchronous call to finish and update the visibility'
| def has_button_import(self, falsify=False, user=None):
| code = ("\n window.setTimeout(function () {\n if (%s$('.o_button_import').length) {\n console.log('ok');\n } else {\n console.log('error');\n };\n }, 1000);\n " % ('!' if falsify else ''))
action = self.env.ref('base.action_partner_category_form').id
link = ('/web#action=%s' % action)
self.phantom_js(link, code, "$('button.o_list_button_add').length", login=user.login)
|
'Admin user can import data, but the demo user cannot'
| def test_01_load(self):
| fields = ('id', 'name', 'perm_read', 'perm_write', 'perm_create', 'perm_unlink')
data = [('access_res_users_test', 'res.users test', '1', '0', '0', '0'), ('access_res_users_test2', 'res.users test2', '1', '1', '1', '1')]
self.has_button_import(user=self.env.user)
res = self.Access.load(fields, data)
self.assertEqual(res['ids'], False)
self.assertEqual(len(res['messages']), 2)
self.assertEqual(res['messages'][0]['message'], "Missing required value for the field 'Object' (model_id)")
self.assertEqual(res['messages'][1]['message'], "Missing required value for the field 'Object' (model_id)")
self.has_button_import(falsify=True, user=self.user_test)
res2 = self.Access.sudo(self.user_test).load(fields, data)
self.assertEqual(res2['ids'], None)
self.assertEqual(len(res2['messages']), 1)
self.assertEqual(res2['messages'][0]['message'], ('User (ID: %s) is not allowed to import data in model ir.model.access.' % self.user_test.id))
|
'Default to ``backups`` folder inside current server datadir.'
| @api.model
def _default_folder(self):
| return os.path.join(tools.config['data_dir'], 'backups', self.env.cr.dbname)
|
'Get the right summary for this job.'
| @api.multi
@api.depends('folder', 'method', 'sftp_host', 'sftp_port', 'sftp_user')
def _compute_name(self):
| for rec in self:
if (rec.method == 'local'):
rec.name = ('%s @ localhost' % rec.folder)
elif (rec.method == 'sftp'):
rec.name = ('sftp://%s@%s:%d%s' % (rec.sftp_user, rec.sftp_host, rec.sftp_port, rec.folder))
|
'Do not use the filestore or you will backup your backups.'
| @api.multi
@api.constrains('folder', 'method')
def _check_folder(self):
| for s in self:
if ((s.method == 'local') and s.folder.startswith(tools.config.filestore(self.env.cr.dbname))):
raise exceptions.ValidationError(_('Do not save backups on your filestore, or you will backup your backups too!'))
|
'Check if the SFTP settings are correct.'
| @api.multi
def action_sftp_test_connection(self):
| try:
with self.sftp_connection():
raise exceptions.Warning(_('Connection Test Succeeded!'))
except (pysftp.CredentialException, pysftp.ConnectionException, pysftp.SSHException):
_logger.info('Connection Test Failed!', exc_info=True)
raise exceptions.Warning(_('Connection Test Failed!'))
|
'Run selected backups.'
| @api.multi
def action_backup(self):
| backup = None
filename = self.filename(datetime.now())
successful = self.browse()
for rec in self.filtered((lambda r: (r.method == 'local'))):
with rec.backup_log():
try:
os.makedirs(rec.folder)
except OSError:
pass
with open(os.path.join(rec.folder, filename), 'wb') as destiny:
if backup:
with open(backup) as cached:
shutil.copyfileobj(cached, destiny)
else:
db.dump_db(self.env.cr.dbname, destiny)
backup = (backup or destiny.name)
successful |= rec
sftp = self.filtered((lambda r: (r.method == 'sftp')))
if sftp:
if backup:
cached = open(backup)
else:
cached = db.dump_db(self.env.cr.dbname, None)
with cached:
for rec in sftp:
with rec.backup_log():
with rec.sftp_connection() as remote:
try:
remote.makedirs(rec.folder)
except pysftp.ConnectionException:
pass
with remote.open(os.path.join(rec.folder, filename), 'wb') as destiny:
shutil.copyfileobj(cached, destiny)
successful |= rec
successful.cleanup()
|
'Run all scheduled backups.'
| @api.model
def action_backup_all(self):
| return self.search([]).action_backup()
|
'Log a backup result.'
| @api.multi
@contextmanager
def backup_log(self):
| try:
_logger.info('Starting database backup: %s', self.name)
(yield)
except:
_logger.exception('Database backup failed: %s', self.name)
escaped_tb = tools.html_escape(traceback.format_exc())
self.message_post(('<p>%s</p><pre>%s</pre>' % (_('Database backup failed.'), escaped_tb)), subtype=self.env.ref('auto_backup.mail_message_subtype_failure'))
else:
_logger.info('Database backup succeeded: %s', self.name)
self.message_post(_('Database backup succeeded.'))
|
'Clean up old backups.'
| @api.multi
def cleanup(self):
| now = datetime.now()
for rec in self.filtered('days_to_keep'):
with rec.cleanup_log():
oldest = self.filename((now - timedelta(days=rec.days_to_keep)))
if (rec.method == 'local'):
for name in iglob(os.path.join(rec.folder, '*.dump.zip')):
if (os.path.basename(name) < oldest):
os.unlink(name)
elif (rec.method == 'sftp'):
with rec.sftp_connection() as remote:
for name in remote.listdir(rec.folder):
if (name.endswith('.dump.zip') and (os.path.basename(name) < oldest)):
remote.unlink(('%s/%s' % (rec.folder, name)))
|
'Log a possible cleanup failure.'
| @api.multi
@contextmanager
def cleanup_log(self):
| self.ensure_one()
try:
_logger.info('Starting cleanup process after database backup: %s', self.name)
(yield)
except:
_logger.exception('Cleanup of old database backups failed: %s')
escaped_tb = tools.html_escape(traceback.format_exc())
self.message_post(('<p>%s</p><pre>%s</pre>' % (_('Cleanup of old database backups failed.'), escaped_tb)), subtype=self.env.ref('auto_backup.failure'))
else:
_logger.info('Cleanup of old database backups succeeded: %s', self.name)
|
'Generate a file name for a backup.
:param datetime.datetime when:
Use this datetime instead of :meth:`datetime.datetime.now`.'
| @api.model
def filename(self, when):
| return '{:%Y_%m_%d_%H_%M_%S}.dump.zip'.format(when)
|
'Return a new SFTP connection with found parameters.'
| @api.multi
def sftp_connection(self):
| self.ensure_one()
params = {'host': self.sftp_host, 'username': self.sftp_user, 'port': self.sftp_port}
_logger.debug('Trying to connect to sftp://%(username)s@%(host)s:%(port)d', extra=params)
if self.sftp_private_key:
params['private_key'] = self.sftp_private_key
if self.sftp_password:
params['private_key_pass'] = self.sftp_password
else:
params['password'] = self.sftp_password
return pysftp.Connection(**params)
|
'It provides mocked core assets'
| @contextmanager
def mock_assets(self):
| self.path_join_val = '/this/is/a/path'
with mock.patch(('%s.db' % model)) as db:
with mock.patch(('%s.os' % model)) as os:
with mock.patch(('%s.shutil' % model)) as shutil:
os.path.join.return_value = self.path_join_val
(yield {'db': db, 'os': os, 'shutil': shutil})
|
'It patches filtered record and provides a mock'
| @contextmanager
def patch_filtered_sftp(self, record, mocks=None):
| if (mocks is None):
mocks = ['sftp_connection']
mocks = {m: mock.DEFAULT for m in mocks}
with mock.patch.object(record, 'filtered') as filtered:
with mock.patch.object(record, 'backup_log'):
with mock.patch.multiple(record, **mocks):
filtered.side_effect = ([], [record])
(yield filtered)
|
'It should create proper SFTP URI'
| def test_compute_name_sftp(self):
| rec_id = self.new_record()
self.assertEqual(('sftp://%(user)s@%(host)s:%(port)s%(folder)s' % {'user': self.vals['sftp_user'], 'host': self.vals['sftp_host'], 'port': self.vals['sftp_port'], 'folder': self.vals['folder']}), rec_id.name)
|
'It should not allow recursive backups'
| def test_check_folder(self):
| rec_id = self.new_record('local')
with self.assertRaises(exceptions.ValidationError):
rec_id.write({'folder': ('%s/another/path' % tools.config.filestore(self.env.cr.dbname))})
|
'It should raise connection succeeded warning'
| @mock.patch(('%s._' % model))
def test_action_sftp_test_connection_success(self, _):
| rec_id = self.new_record()
with mock.patch.object(rec_id, 'sftp_connection'):
with self.assertRaises(exceptions.Warning):
rec_id.action_sftp_test_connection()
_.assert_called_once_with('Connection Test Succeeded!')
|
'It should raise connection fail warning'
| @mock.patch(('%s._' % model))
def test_action_sftp_test_connection_fail(self, _):
| rec_id = self.new_record()
with mock.patch.object(rec_id, 'sftp_connection') as conn:
conn().__enter__.side_effect = TestConnectionException
with self.assertRaises(exceptions.Warning):
rec_id.action_sftp_test_connection()
_.assert_called_once_with('Connection Test Failed!')
|
'It should backup local database'
| def test_action_backup_local(self):
| rec_id = self.new_record('local')
filename = rec_id.filename(datetime.now())
rec_id.action_backup()
generated_backup = [f for f in os.listdir(rec_id.folder) if (f >= filename)]
self.assertEqual(1, len(generated_backup))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.