language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def hooks_namespace(k, v): """Attach bare hooks declared in config.""" # Use split again to allow multiple hooks for a single # hookpoint per path (e.g. "hooks.before_handler.1"). # Little-known fact you only get from reading source ;) hookpoint = k.split('.', 1)[0] if isinstance(v, str): v = cherrypy.lib.reprconf.attributes(v) if not isinstance(v, Hook): v = Hook(v) cherrypy.serving.request.hooks[hookpoint].append(v)
def hooks_namespace(k, v): """Attach bare hooks declared in config.""" # Use split again to allow multiple hooks for a single # hookpoint per path (e.g. "hooks.before_handler.1"). # Little-known fact you only get from reading source ;) hookpoint = k.split('.', 1)[0] if isinstance(v, str): v = cherrypy.lib.reprconf.attributes(v) if not isinstance(v, Hook): v = Hook(v) cherrypy.serving.request.hooks[hookpoint].append(v)
Python
def request_namespace(k, v): """Attach request attributes declared in config.""" # Provides config entries to set request.body attrs (like # attempt_charsets). if k[:5] == 'body.': setattr(cherrypy.serving.request.body, k[5:], v) else: setattr(cherrypy.serving.request, k, v)
def request_namespace(k, v): """Attach request attributes declared in config.""" # Provides config entries to set request.body attrs (like # attempt_charsets). if k[:5] == 'body.': setattr(cherrypy.serving.request.body, k[5:], v) else: setattr(cherrypy.serving.request, k, v)
Python
def response_namespace(k, v): """Attach response attributes declared in config.""" # Provides config entries to set default response headers # http://cherrypy.org/ticket/889 if k[:8] == 'headers.': cherrypy.serving.response.headers[k.split('.', 1)[1]] = v else: setattr(cherrypy.serving.response, k, v)
def response_namespace(k, v): """Attach response attributes declared in config.""" # Provides config entries to set default response headers # http://cherrypy.org/ticket/889 if k[:8] == 'headers.': cherrypy.serving.response.headers[k.split('.', 1)[1]] = v else: setattr(cherrypy.serving.response, k, v)
Python
def error_page_namespace(k, v): """Attach error pages declared in config.""" if k != 'default': k = int(k) cherrypy.serving.request.error_page[k] = v
def error_page_namespace(k, v): """Attach error pages declared in config.""" if k != 'default': k = int(k) cherrypy.serving.request.error_page[k] = v
Python
def respond(self, path_info): """Generate a response for the resource at self.path_info. (Core)""" try: try: try: self._do_respond(path_info) except (cherrypy.HTTPRedirect, cherrypy.HTTPError): inst = sys.exc_info()[1] inst.set_response() self.stage = 'before_finalize (HTTPError)' self.hooks.run('before_finalize') cherrypy.serving.response.finalize() finally: self.stage = 'on_end_resource' self.hooks.run('on_end_resource') except self.throws: raise except Exception: if self.throw_errors: raise self.handle_error()
def respond(self, path_info): """Generate a response for the resource at self.path_info. (Core)""" try: try: try: self._do_respond(path_info) except (cherrypy.HTTPRedirect, cherrypy.HTTPError): inst = sys.exc_info()[1] inst.set_response() self.stage = 'before_finalize (HTTPError)' self.hooks.run('before_finalize') cherrypy.serving.response.finalize() finally: self.stage = 'on_end_resource' self.hooks.run('on_end_resource') except self.throws: raise except Exception: if self.throw_errors: raise self.handle_error()
Python
def process_query_string(self): """Parse the query string into Python structures. (Core)""" try: p = httputil.parse_query_string( self.query_string, encoding=self.query_string_encoding) except UnicodeDecodeError: raise cherrypy.HTTPError( 404, 'The given query string could not be processed. Query ' 'strings for this resource must be encoded with %r.' % self.query_string_encoding) self.params.update(p)
def process_query_string(self): """Parse the query string into Python structures. (Core)""" try: p = httputil.parse_query_string( self.query_string, encoding=self.query_string_encoding) except UnicodeDecodeError: raise cherrypy.HTTPError( 404, 'The given query string could not be processed. Query ' 'strings for this resource must be encoded with %r.' % self.query_string_encoding) self.params.update(p)
Python
def process_headers(self): """Parse HTTP header data into Python structures. (Core)""" # Process the headers into self.headers headers = self.headers for name, value in self.header_list: # Call title() now (and use dict.__method__(headers)) # so title doesn't have to be called twice. name = name.title() value = value.strip() headers[name] = httputil.decode_TEXT_maybe(value) # Some clients, notably Konquoror, supply multiple # cookies on different lines with the same key. To # handle this case, store all cookies in self.cookie. if name == 'Cookie': try: self.cookie.load(value) except CookieError as exc: raise cherrypy.HTTPError(400, str(exc)) if not dict.__contains__(headers, 'Host'): # All Internet-based HTTP/1.1 servers MUST respond with a 400 # (Bad Request) status code to any HTTP/1.1 request message # which lacks a Host header field. if self.protocol >= (1, 1): msg = "HTTP/1.1 requires a 'Host' request header." raise cherrypy.HTTPError(400, msg) host = dict.get(headers, 'Host') if not host: host = self.local.name or self.local.ip self.base = '%s://%s' % (self.scheme, host)
def process_headers(self): """Parse HTTP header data into Python structures. (Core)""" # Process the headers into self.headers headers = self.headers for name, value in self.header_list: # Call title() now (and use dict.__method__(headers)) # so title doesn't have to be called twice. name = name.title() value = value.strip() headers[name] = httputil.decode_TEXT_maybe(value) # Some clients, notably Konquoror, supply multiple # cookies on different lines with the same key. To # handle this case, store all cookies in self.cookie. if name == 'Cookie': try: self.cookie.load(value) except CookieError as exc: raise cherrypy.HTTPError(400, str(exc)) if not dict.__contains__(headers, 'Host'): # All Internet-based HTTP/1.1 servers MUST respond with a 400 # (Bad Request) status code to any HTTP/1.1 request message # which lacks a Host header field. if self.protocol >= (1, 1): msg = "HTTP/1.1 requires a 'Host' request header." raise cherrypy.HTTPError(400, msg) host = dict.get(headers, 'Host') if not host: host = self.local.name or self.local.ip self.base = '%s://%s' % (self.scheme, host)
Python
def handle_error(self): """Handle the last unanticipated exception. (Core)""" try: self.hooks.run('before_error_response') if self.error_response: self.error_response() self.hooks.run('after_error_response') cherrypy.serving.response.finalize() except cherrypy.HTTPRedirect: inst = sys.exc_info()[1] inst.set_response() cherrypy.serving.response.finalize()
def handle_error(self): """Handle the last unanticipated exception. (Core)""" try: self.hooks.run('before_error_response') if self.error_response: self.error_response() self.hooks.run('after_error_response') cherrypy.serving.response.finalize() except cherrypy.HTTPRedirect: inst = sys.exc_info()[1] inst.set_response() cherrypy.serving.response.finalize()
Python
def collapse_body(self): """Collapse self.body to a single string; replace it and return it.""" new_body = b''.join(self.body) self.body = new_body return new_body
def collapse_body(self): """Collapse self.body to a single string; replace it and return it.""" new_body = b''.join(self.body) self.body = new_body return new_body
Python
def _flush_body(self): """ Discard self.body but consume any generator such that any finalization can occur, such as is required by caching.tee_output(). """ consume(iter(self.body))
def _flush_body(self): """ Discard self.body but consume any generator such that any finalization can occur, such as is required by caching.tee_output(). """ consume(iter(self.body))
Python
def uuid4(self): """Provide unique id on per-request basis using UUID4. It's evaluated lazily on render. """ try: self._uuid4 except AttributeError: # evaluate on first access self._uuid4 = uuid.uuid4() return self._uuid4
def uuid4(self): """Provide unique id on per-request basis using UUID4. It's evaluated lazily on render. """ try: self._uuid4 except AttributeError: # evaluate on first access self._uuid4 = uuid.uuid4() return self._uuid4
Python
def rethrow(self): """Test that an error raised here will be thrown out to the server. """ raise ValueError()
def rethrow(self): """Test that an error raised here will be thrown out to the server. """ raise ValueError()
Python
def markLog(self, key=None): """Insert a marker line into the log and set self.lastmarker.""" if key is None: key = str(time.time()) self.lastmarker = key open(self.logfile, 'ab+').write( b'%s%s\n' % (self.markerPrefix, key.encode('utf-8')) )
def markLog(self, key=None): """Insert a marker line into the log and set self.lastmarker.""" if key is None: key = str(time.time()) self.lastmarker = key open(self.logfile, 'ab+').write( b'%s%s\n' % (self.markerPrefix, key.encode('utf-8')) )
Python
def _read_marked_region(self, marker=None): """Return lines from self.logfile in the marked region. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be returned. """ # Give the logger time to finish writing? # time.sleep(0.5) logfile = self.logfile marker = marker or self.lastmarker if marker is None: return open(logfile, 'rb').readlines() if isinstance(marker, str): marker = marker.encode('utf-8') data = [] in_region = False for line in open(logfile, 'rb'): if in_region: if line.startswith(self.markerPrefix) and marker not in line: break else: data.append(line) elif marker in line: in_region = True return data
def _read_marked_region(self, marker=None): """Return lines from self.logfile in the marked region. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be returned. """ # Give the logger time to finish writing? # time.sleep(0.5) logfile = self.logfile marker = marker or self.lastmarker if marker is None: return open(logfile, 'rb').readlines() if isinstance(marker, str): marker = marker.encode('utf-8') data = [] in_region = False for line in open(logfile, 'rb'): if in_region: if line.startswith(self.markerPrefix) and marker not in line: break else: data.append(line) elif marker in line: in_region = True return data
Python
def assertInLog(self, line, marker=None): """Fail if the given (partial) line is not in the log. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) for logline in data: if line in logline: return msg = '%r not found in log' % line self._handleLogError(msg, data, marker, line)
def assertInLog(self, line, marker=None): """Fail if the given (partial) line is not in the log. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) for logline in data: if line in logline: return msg = '%r not found in log' % line self._handleLogError(msg, data, marker, line)
Python
def assertNotInLog(self, line, marker=None): """Fail if the given (partial) line is in the log. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) for logline in data: if line in logline: msg = '%r found in log' % line self._handleLogError(msg, data, marker, line)
def assertNotInLog(self, line, marker=None): """Fail if the given (partial) line is in the log. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) for logline in data: if line in logline: msg = '%r found in log' % line self._handleLogError(msg, data, marker, line)
Python
def assertValidUUIDv4(self, marker=None): """Fail if the given UUIDv4 is not valid. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) data = [ chunk.decode('utf-8').rstrip('\n').rstrip('\r') for chunk in data ] for log_chunk in data: try: uuid_log = data[-1] uuid_obj = UUID(uuid_log, version=4) except (TypeError, ValueError): pass # it might be in other chunk else: if str(uuid_obj) == uuid_log: return msg = '%r is not a valid UUIDv4' % uuid_log self._handleLogError(msg, data, marker, log_chunk) msg = 'UUIDv4 not found in log' self._handleLogError(msg, data, marker, log_chunk)
def assertValidUUIDv4(self, marker=None): """Fail if the given UUIDv4 is not valid. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) data = [ chunk.decode('utf-8').rstrip('\n').rstrip('\r') for chunk in data ] for log_chunk in data: try: uuid_log = data[-1] uuid_obj = UUID(uuid_log, version=4) except (TypeError, ValueError): pass # it might be in other chunk else: if str(uuid_obj) == uuid_log: return msg = '%r is not a valid UUIDv4' % uuid_log self._handleLogError(msg, data, marker, log_chunk) msg = 'UUIDv4 not found in log' self._handleLogError(msg, data, marker, log_chunk)
Python
def assertLog(self, sliceargs, lines, marker=None): """Fail if log.readlines()[sliceargs] is not contained in 'lines'. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) if isinstance(sliceargs, int): # Single arg. Use __getitem__ and allow lines to be str or list. if isinstance(lines, (tuple, list)): lines = lines[0] if isinstance(lines, str): lines = lines.encode('utf-8') if lines not in data[sliceargs]: msg = '%r not found on log line %r' % (lines, sliceargs) self._handleLogError( msg, [data[sliceargs], '--EXTRA CONTEXT--'] + data[ sliceargs + 1:sliceargs + 6], marker, lines) else: # Multiple args. Use __getslice__ and require lines to be list. if isinstance(lines, tuple): lines = list(lines) elif isinstance(lines, text_or_bytes): raise TypeError("The 'lines' arg must be a list when " "'sliceargs' is a tuple.") start, stop = sliceargs for line, logline in zip(lines, data[start:stop]): if isinstance(line, str): line = line.encode('utf-8') if line not in logline: msg = '%r not found in log' % line self._handleLogError(msg, data[start:stop], marker, line)
def assertLog(self, sliceargs, lines, marker=None): """Fail if log.readlines()[sliceargs] is not contained in 'lines'. The log will be searched from the given marker to the next marker. If marker is None, self.lastmarker is used. If the log hasn't been marked (using self.markLog), the entire log will be searched. """ data = self._read_marked_region(marker) if isinstance(sliceargs, int): # Single arg. Use __getitem__ and allow lines to be str or list. if isinstance(lines, (tuple, list)): lines = lines[0] if isinstance(lines, str): lines = lines.encode('utf-8') if lines not in data[sliceargs]: msg = '%r not found on log line %r' % (lines, sliceargs) self._handleLogError( msg, [data[sliceargs], '--EXTRA CONTEXT--'] + data[ sliceargs + 1:sliceargs + 6], marker, lines) else: # Multiple args. Use __getslice__ and require lines to be list. if isinstance(lines, tuple): lines = list(lines) elif isinstance(lines, text_or_bytes): raise TypeError("The 'lines' arg must be a list when " "'sliceargs' is a tuple.") start, stop = sliceargs for line, logline in zip(lines, data[start:stop]): if isinstance(line, str): line = line.encode('utf-8') if line not in logline: msg = '%r not found in log' % line self._handleLogError(msg, data[start:stop], marker, line)
Python
def load(self): """Copy stored session data into this session instance.""" data = self._load() # data is either None or a tuple (session_data, expiration_time) if data is None or data[1] < self.now(): if self.debug: cherrypy.log('Expired session %r, flushing data.' % self.id, 'TOOLS.SESSIONS') self._data = {} else: if self.debug: cherrypy.log('Data loaded for session %r.' % self.id, 'TOOLS.SESSIONS') self._data = data[0] self.loaded = True # Stick the clean_thread in the class, not the instance. # The instances are created and destroyed per-request. cls = self.__class__ if self.clean_freq and not cls.clean_thread: # clean_up is an instancemethod and not a classmethod, # so that tool config can be accessed inside the method. t = cherrypy.process.plugins.Monitor( cherrypy.engine, self.clean_up, self.clean_freq * 60, name='Session cleanup') t.subscribe() cls.clean_thread = t t.start() if self.debug: cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
def load(self): """Copy stored session data into this session instance.""" data = self._load() # data is either None or a tuple (session_data, expiration_time) if data is None or data[1] < self.now(): if self.debug: cherrypy.log('Expired session %r, flushing data.' % self.id, 'TOOLS.SESSIONS') self._data = {} else: if self.debug: cherrypy.log('Data loaded for session %r.' % self.id, 'TOOLS.SESSIONS') self._data = data[0] self.loaded = True # Stick the clean_thread in the class, not the instance. # The instances are created and destroyed per-request. cls = self.__class__ if self.clean_freq and not cls.clean_thread: # clean_up is an instancemethod and not a classmethod, # so that tool config can be accessed inside the method. t = cherrypy.process.plugins.Monitor( cherrypy.engine, self.clean_up, self.clean_freq * 60, name='Session cleanup') t.subscribe() cls.clean_thread = t t.start() if self.debug: cherrypy.log('Started cleanup thread.', 'TOOLS.SESSIONS')
Python
def pop(self, key, default=missing): """Remove the specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised. """ if not self.loaded: self.load() if default is missing: return self._data.pop(key) else: return self._data.pop(key, default)
def pop(self, key, default=missing): """Remove the specified key and return the corresponding value. If key is not found, default is returned if given, otherwise KeyError is raised. """ if not self.loaded: self.load() if default is missing: return self._data.pop(key) else: return self._data.pop(key, default)
Python
def acquire_lock(self, path=None): """Acquire an exclusive lock on the currently-loaded session data.""" if path is None: path = self._get_file_path() path += self.LOCK_SUFFIX checker = locking.LockChecker(self.id, self.lock_timeout) while not checker.expired(): try: self.lock = zc.lockfile.LockFile(path) except zc.lockfile.LockError: time.sleep(0.1) else: break self.locked = True if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def acquire_lock(self, path=None): """Acquire an exclusive lock on the currently-loaded session data.""" if path is None: path = self._get_file_path() path += self.LOCK_SUFFIX checker = locking.LockChecker(self.id, self.lock_timeout) while not checker.expired(): try: self.lock = zc.lockfile.LockFile(path) except zc.lockfile.LockError: time.sleep(0.1) else: break self.locked = True if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
Python
def acquire_lock(self): """Acquire an exclusive lock on the currently-loaded session data.""" self.locked = True self.locks.setdefault(self.id, threading.RLock()).acquire() if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
def acquire_lock(self): """Acquire an exclusive lock on the currently-loaded session data.""" self.locked = True self.locks.setdefault(self.id, threading.RLock()).acquire() if self.debug: cherrypy.log('Lock acquired.', 'TOOLS.SESSIONS')
Python
def save(): """Save any changed session data.""" if not hasattr(cherrypy.serving, 'session'): return request = cherrypy.serving.request response = cherrypy.serving.response # Guard against running twice if hasattr(request, '_sessionsaved'): return request._sessionsaved = True if response.stream: # If the body is being streamed, we have to save the data # *after* the response has been written out request.hooks.attach('on_end_request', cherrypy.session.save) else: # If the body is not being streamed, we save the data now # (so we can release the lock). if is_iterator(response.body): response.collapse_body() cherrypy.session.save()
def save(): """Save any changed session data.""" if not hasattr(cherrypy.serving, 'session'): return request = cherrypy.serving.request response = cherrypy.serving.response # Guard against running twice if hasattr(request, '_sessionsaved'): return request._sessionsaved = True if response.stream: # If the body is being streamed, we have to save the data # *after* the response has been written out request.hooks.attach('on_end_request', cherrypy.session.save) else: # If the body is not being streamed, we save the data now # (so we can release the lock). if is_iterator(response.body): response.collapse_body() cherrypy.session.save()
Python
def close(): """Close the session object for this request.""" sess = getattr(cherrypy.serving, 'session', None) if getattr(sess, 'locked', False): # If the session is still locked we release the lock sess.release_lock() if sess.debug: cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
def close(): """Close the session object for this request.""" sess = getattr(cherrypy.serving, 'session', None) if getattr(sess, 'locked', False): # If the session is still locked we release the lock sess.release_lock() if sess.debug: cherrypy.log('Lock released on close.', 'TOOLS.SESSIONS')
Python
def expire(): """Expire the current session cookie.""" name = cherrypy.serving.request.config.get( 'tools.sessions.name', 'session_id') one_year = 60 * 60 * 24 * 365 e = time.time() - one_year cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e) cherrypy.serving.response.cookie[name].pop('max-age', None)
def expire(): """Expire the current session cookie.""" name = cherrypy.serving.request.config.get( 'tools.sessions.name', 'session_id') one_year = 60 * 60 * 24 * 365 e = time.time() - one_year cherrypy.serving.response.cookie[name]['expires'] = httputil.HTTPDate(e) cherrypy.serving.response.cookie[name].pop('max-age', None)
Python
def load_module(name): """ Import or reload tutorial module as needed. """ target = 'cherrypy.tutorial.' + name if target in sys.modules: module = importlib.reload(sys.modules[target]) else: module = importlib.import_module(target) return module
def load_module(name): """ Import or reload tutorial module as needed. """ target = 'cherrypy.tutorial.' + name if target in sys.modules: module = importlib.reload(sys.modules[target]) else: module = importlib.import_module(target) return module
Python
def assertErrorPage(self, status, message=None, pattern=''): """Compare the response body with a built in error page. The function will optionally look for the regexp pattern, within the exception embedded in the error page.""" # This will never contain a traceback page = cherrypy._cperror.get_error_page(status, message=message) # First, test the response body without checking the traceback. # Stick a match-all group (.*) in to grab the traceback. def esc(text): return re.escape(ntob(text)) epage = re.escape(page) epage = epage.replace( esc('<pre id="traceback"></pre>'), esc('<pre id="traceback">') + b'(.*)' + esc('</pre>')) m = re.match(epage, self.body, re.DOTALL) if not m: self._handlewebError( 'Error page does not match; expected:\n' + page) return # Now test the pattern against the traceback if pattern is None: # Special-case None to mean that there should be *no* traceback. if m and m.group(1): self._handlewebError('Error page contains traceback') else: if (m is None) or ( not re.search(ntob(re.escape(pattern), self.encoding), m.group(1))): msg = 'Error page does not contain %s in traceback' self._handlewebError(msg % repr(pattern))
def assertErrorPage(self, status, message=None, pattern=''): """Compare the response body with a built in error page. The function will optionally look for the regexp pattern, within the exception embedded in the error page.""" # This will never contain a traceback page = cherrypy._cperror.get_error_page(status, message=message) # First, test the response body without checking the traceback. # Stick a match-all group (.*) in to grab the traceback. def esc(text): return re.escape(ntob(text)) epage = re.escape(page) epage = epage.replace( esc('<pre id="traceback"></pre>'), esc('<pre id="traceback">') + b'(.*)' + esc('</pre>')) m = re.match(epage, self.body, re.DOTALL) if not m: self._handlewebError( 'Error page does not match; expected:\n' + page) return # Now test the pattern against the traceback if pattern is None: # Special-case None to mean that there should be *no* traceback. if m and m.group(1): self._handlewebError('Error page contains traceback') else: if (m is None) or ( not re.search(ntob(re.escape(pattern), self.encoding), m.group(1))): msg = 'Error page does not contain %s in traceback' self._handlewebError(msg % repr(pattern))
Python
def start(self, imports=None): """Start cherryd in a subprocess.""" portend.free(self.host, self.port, timeout=1) args = [ '-m', 'cherrypy', '-c', self.config_file, '-p', self.pid_file, ] r""" Command for running cherryd server with autoreload enabled Using ``` ['-c', "__requires__ = 'CherryPy'; \ import pkg_resources, re, sys; \ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \ sys.exit(\ pkg_resources.load_entry_point(\ 'CherryPy', 'console_scripts', 'cherryd')())"] ``` doesn't work as it's impossible to reconstruct the `-c`'s contents. Ref: https://github.com/cherrypy/cherrypy/issues/1545 """ if not isinstance(imports, (list, tuple)): imports = [imports] for i in imports: if i: args.append('-i') args.append(i) if self.daemonize: args.append('-d') env = os.environ.copy() # Make sure we import the cherrypy package in which this module is # defined. grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..')) if env.get('PYTHONPATH', ''): env['PYTHONPATH'] = os.pathsep.join( (grandparentdir, env['PYTHONPATH'])) else: env['PYTHONPATH'] = grandparentdir self._proc = subprocess.Popen([sys.executable] + args, env=env) if self.wait: self.exit_code = self._proc.wait() else: portend.occupied(self.host, self.port, timeout=5) # Give the engine a wee bit more time to finish STARTING if self.daemonize: time.sleep(2) else: time.sleep(1)
def start(self, imports=None): """Start cherryd in a subprocess.""" portend.free(self.host, self.port, timeout=1) args = [ '-m', 'cherrypy', '-c', self.config_file, '-p', self.pid_file, ] r""" Command for running cherryd server with autoreload enabled Using ``` ['-c', "__requires__ = 'CherryPy'; \ import pkg_resources, re, sys; \ sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \ sys.exit(\ pkg_resources.load_entry_point(\ 'CherryPy', 'console_scripts', 'cherryd')())"] ``` doesn't work as it's impossible to reconstruct the `-c`'s contents. Ref: https://github.com/cherrypy/cherrypy/issues/1545 """ if not isinstance(imports, (list, tuple)): imports = [imports] for i in imports: if i: args.append('-i') args.append(i) if self.daemonize: args.append('-d') env = os.environ.copy() # Make sure we import the cherrypy package in which this module is # defined. grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..')) if env.get('PYTHONPATH', ''): env['PYTHONPATH'] = os.pathsep.join( (grandparentdir, env['PYTHONPATH'])) else: env['PYTHONPATH'] = grandparentdir self._proc = subprocess.Popen([sys.executable] + args, env=env) if self.wait: self.exit_code = self._proc.wait() else: portend.occupied(self.host, self.port, timeout=5) # Give the engine a wee bit more time to finish STARTING if self.daemonize: time.sleep(2) else: time.sleep(1)
Python
def synthesize_nonce(s, key, timestamp=None): """Synthesize a nonce value which resists spoofing and can be checked for staleness. Returns a string suitable as the value for 'nonce' in the www-authenticate header. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. timestamp An integer seconds-since-the-epoch timestamp """ if timestamp is None: timestamp = int(time.time()) h = md5_hex('%s:%s:%s' % (timestamp, s, key)) nonce = '%s:%s' % (timestamp, h) return nonce
def synthesize_nonce(s, key, timestamp=None): """Synthesize a nonce value which resists spoofing and can be checked for staleness. Returns a string suitable as the value for 'nonce' in the www-authenticate header. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. timestamp An integer seconds-since-the-epoch timestamp """ if timestamp is None: timestamp = int(time.time()) h = md5_hex('%s:%s:%s' % (timestamp, s, key)) nonce = '%s:%s' % (timestamp, h) return nonce
Python
def validate_nonce(self, s, key): """Validate the nonce. Returns True if nonce was generated by synthesize_nonce() and the timestamp is not spoofed, else returns False. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. Both s and key must be the same values which were used to synthesize the nonce we are trying to validate. """ try: timestamp, hashpart = self.nonce.split(':', 1) s_timestamp, s_hashpart = synthesize_nonce( s, key, timestamp).split(':', 1) is_valid = s_hashpart == hashpart if self.debug: TRACE('validate_nonce: %s' % is_valid) return is_valid except ValueError: # split() error pass return False
def validate_nonce(self, s, key): """Validate the nonce. Returns True if nonce was generated by synthesize_nonce() and the timestamp is not spoofed, else returns False. s A string related to the resource, such as the hostname of the server. key A secret string known only to the server. Both s and key must be the same values which were used to synthesize the nonce we are trying to validate. """ try: timestamp, hashpart = self.nonce.split(':', 1) s_timestamp, s_hashpart = synthesize_nonce( s, key, timestamp).split(':', 1) is_valid = s_hashpart == hashpart if self.debug: TRACE('validate_nonce: %s' % is_valid) return is_valid except ValueError: # split() error pass return False
Python
def is_nonce_stale(self, max_age_seconds=600): """Returns True if a validated nonce is stale. The nonce contains a timestamp in plaintext and also a secure hash of the timestamp. You should first validate the nonce to ensure the plaintext timestamp is not spoofed. """ try: timestamp, hashpart = self.nonce.split(':', 1) if int(timestamp) + max_age_seconds > int(time.time()): return False except ValueError: # int() error pass if self.debug: TRACE('nonce is stale') return True
def is_nonce_stale(self, max_age_seconds=600): """Returns True if a validated nonce is stale. The nonce contains a timestamp in plaintext and also a secure hash of the timestamp. You should first validate the nonce to ensure the plaintext timestamp is not spoofed. """ try: timestamp, hashpart = self.nonce.split(':', 1) if int(timestamp) + max_age_seconds > int(time.time()): return False except ValueError: # int() error pass if self.debug: TRACE('nonce is stale') return True
Python
def www_authenticate( realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False, accept_charset=DEFAULT_CHARSET[:], ): """Constructs a WWW-Authenticate header for Digest authentication.""" if qop not in valid_qops: raise ValueError("Unsupported value for qop: '%s'" % qop) if algorithm not in valid_algorithms: raise ValueError("Unsupported value for algorithm: '%s'" % algorithm) HEADER_PATTERN = ( 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"%s%s' ) if nonce is None: nonce = synthesize_nonce(realm, key) stale_param = ', stale="true"' if stale else '' charset_declaration = _get_charset_declaration(accept_charset) return HEADER_PATTERN % ( realm, nonce, algorithm, qop, stale_param, charset_declaration, )
def www_authenticate( realm, key, algorithm='MD5', nonce=None, qop=qop_auth, stale=False, accept_charset=DEFAULT_CHARSET[:], ): """Constructs a WWW-Authenticate header for Digest authentication.""" if qop not in valid_qops: raise ValueError("Unsupported value for qop: '%s'" % qop) if algorithm not in valid_algorithms: raise ValueError("Unsupported value for algorithm: '%s'" % algorithm) HEADER_PATTERN = ( 'Digest realm="%s", nonce="%s", algorithm="%s", qop="%s"%s%s' ) if nonce is None: nonce = synthesize_nonce(realm, key) stale_param = ', stale="true"' if stale else '' charset_declaration = _get_charset_declaration(accept_charset) return HEADER_PATTERN % ( realm, nonce, algorithm, qop, stale_param, charset_declaration, )
Python
def digest_auth(realm, get_ha1, key, debug=False, accept_charset='utf-8'): """A CherryPy tool that hooks at before_handler to perform HTTP Digest Access Authentication, as specified in :rfc:`2617`. If the request has an 'authorization' header with a 'Digest' scheme, this tool authenticates the credentials supplied in that header. If the request has no 'authorization' header, or if it does but the scheme is not "Digest", or if authentication fails, the tool sends a 401 response with a 'WWW-Authenticate' Digest header. realm A string containing the authentication realm. get_ha1 A callable that looks up a username in a credentials store and returns the HA1 string, which is defined in the RFC to be MD5(username : realm : password). The function's signature is: ``get_ha1(realm, username)`` where username is obtained from the request's 'authorization' header. If username is not found in the credentials store, get_ha1() returns None. key A secret string known only to the server, used in the synthesis of nonces. """ request = cherrypy.serving.request auth_header = request.headers.get('authorization') respond_401 = functools.partial( _respond_401, realm, key, accept_charset, debug) if not HttpDigestAuthorization.matches(auth_header or ''): respond_401() msg = 'The Authorization header could not be parsed.' with cherrypy.HTTPError.handle(ValueError, 400, msg): auth = HttpDigestAuthorization( auth_header, request.method, debug=debug, accept_charset=accept_charset, ) if debug: TRACE(str(auth)) if not auth.validate_nonce(realm, key): respond_401() ha1 = get_ha1(realm, auth.username) if ha1 is None: respond_401() # note that for request.body to be available we need to # hook in at before_handler, not on_start_resource like # 3.1.x digest_auth does. digest = auth.request_digest(ha1, entity_body=request.body) if digest != auth.response: respond_401() # authenticated if debug: TRACE('digest matches auth.response') # Now check if nonce is stale. # The choice of ten minutes' lifetime for nonce is somewhat # arbitrary if auth.is_nonce_stale(max_age_seconds=600): respond_401(stale=True) request.login = auth.username if debug: TRACE('authentication of %s successful' % auth.username)
def digest_auth(realm, get_ha1, key, debug=False, accept_charset='utf-8'): """A CherryPy tool that hooks at before_handler to perform HTTP Digest Access Authentication, as specified in :rfc:`2617`. If the request has an 'authorization' header with a 'Digest' scheme, this tool authenticates the credentials supplied in that header. If the request has no 'authorization' header, or if it does but the scheme is not "Digest", or if authentication fails, the tool sends a 401 response with a 'WWW-Authenticate' Digest header. realm A string containing the authentication realm. get_ha1 A callable that looks up a username in a credentials store and returns the HA1 string, which is defined in the RFC to be MD5(username : realm : password). The function's signature is: ``get_ha1(realm, username)`` where username is obtained from the request's 'authorization' header. If username is not found in the credentials store, get_ha1() returns None. key A secret string known only to the server, used in the synthesis of nonces. """ request = cherrypy.serving.request auth_header = request.headers.get('authorization') respond_401 = functools.partial( _respond_401, realm, key, accept_charset, debug) if not HttpDigestAuthorization.matches(auth_header or ''): respond_401() msg = 'The Authorization header could not be parsed.' with cherrypy.HTTPError.handle(ValueError, 400, msg): auth = HttpDigestAuthorization( auth_header, request.method, debug=debug, accept_charset=accept_charset, ) if debug: TRACE(str(auth)) if not auth.validate_nonce(realm, key): respond_401() ha1 = get_ha1(realm, auth.username) if ha1 is None: respond_401() # note that for request.body to be available we need to # hook in at before_handler, not on_start_resource like # 3.1.x digest_auth does. digest = auth.request_digest(ha1, entity_body=request.body) if digest != auth.response: respond_401() # authenticated if debug: TRACE('digest matches auth.response') # Now check if nonce is stale. # The choice of ten minutes' lifetime for nonce is somewhat # arbitrary if auth.is_nonce_stale(max_age_seconds=600): respond_401(stale=True) request.login = auth.username if debug: TRACE('authentication of %s successful' % auth.username)
Python
def _respond_401(realm, key, accept_charset, debug, **kwargs): """ Respond with 401 status and a WWW-Authenticate header """ header = www_authenticate( realm, key, accept_charset=accept_charset, **kwargs ) if debug: TRACE(header) cherrypy.serving.response.headers['WWW-Authenticate'] = header raise cherrypy.HTTPError( 401, 'You are not authorized to access that resource')
def _respond_401(realm, key, accept_charset, debug, **kwargs): """ Respond with 401 status and a WWW-Authenticate header """ header = www_authenticate( realm, key, accept_charset=accept_charset, **kwargs ) if debug: TRACE(header) cherrypy.serving.response.headers['WWW-Authenticate'] = header raise cherrypy.HTTPError( 401, 'You are not authorized to access that resource')
Python
def default_status(cls): """ The default redirect status for the request. RFC 2616 indicates a 301 response code fits our goal; however, browser support for 301 is quite messy. Use 302/303 instead. See http://www.alanflavell.org.uk/www/post-redirect.html """ return 303 if cherrypy.serving.request.protocol >= (1, 1) else 302
def default_status(cls): """ The default redirect status for the request. RFC 2616 indicates a 301 response code fits our goal; however, browser support for 301 is quite messy. Use 302/303 instead. See http://www.alanflavell.org.uk/www/post-redirect.html """ return 303 if cherrypy.serving.request.protocol >= (1, 1) else 302
Python
def clean_headers(status): """Remove any headers which should not apply to an error response.""" response = cherrypy.serving.response # Remove headers which applied to the original content, # but do not apply to the error page. respheaders = response.headers for key in ['Accept-Ranges', 'Age', 'ETag', 'Location', 'Retry-After', 'Vary', 'Content-Encoding', 'Content-Length', 'Expires', 'Content-Location', 'Content-MD5', 'Last-Modified']: if key in respheaders: del respheaders[key] if status != 416: # A server sending a response with status code 416 (Requested # range not satisfiable) SHOULD include a Content-Range field # with a byte-range-resp-spec of "*". The instance-length # specifies the current length of the selected resource. # A response with status code 206 (Partial Content) MUST NOT # include a Content-Range field with a byte-range- resp-spec of "*". if 'Content-Range' in respheaders: del respheaders['Content-Range']
def clean_headers(status): """Remove any headers which should not apply to an error response.""" response = cherrypy.serving.response # Remove headers which applied to the original content, # but do not apply to the error page. respheaders = response.headers for key in ['Accept-Ranges', 'Age', 'ETag', 'Location', 'Retry-After', 'Vary', 'Content-Encoding', 'Content-Length', 'Expires', 'Content-Location', 'Content-MD5', 'Last-Modified']: if key in respheaders: del respheaders[key] if status != 416: # A server sending a response with status code 416 (Requested # range not satisfiable) SHOULD include a Content-Range field # with a byte-range-resp-spec of "*". The instance-length # specifies the current length of the selected resource. # A response with status code 206 (Partial Content) MUST NOT # include a Content-Range field with a byte-range- resp-spec of "*". if 'Content-Range' in respheaders: del respheaders['Content-Range']
Python
def handle(cls, exception, status=500, message=''): """Translate exception into an HTTPError.""" try: yield except exception as exc: raise cls(status, message or str(exc))
def handle(cls, exception, status=500, message=''): """Translate exception into an HTTPError.""" try: yield except exception as exc: raise cls(status, message or str(exc))
Python
def bare_error(extrabody=None): """Produce status, headers, body for a critical error. Returns a triple without calling any other questionable functions, so it should be as error-free as possible. Call it from an HTTP server if you get errors outside of the request. If extrabody is None, a friendly but rather unhelpful error message is set in the body. If extrabody is a string, it will be appended as-is to the body. """ # The whole point of this function is to be a last line-of-defense # in handling errors. That is, it must not raise any errors itself; # it cannot be allowed to fail. Therefore, don't add to it! # In particular, don't call any other CP functions. body = b'Unrecoverable error in the server.' if extrabody is not None: if not isinstance(extrabody, bytes): extrabody = extrabody.encode('utf-8') body += b'\n' + extrabody return (b'500 Internal Server Error', [(b'Content-Type', b'text/plain'), (b'Content-Length', ntob(str(len(body)), 'ISO-8859-1'))], [body])
def bare_error(extrabody=None): """Produce status, headers, body for a critical error. Returns a triple without calling any other questionable functions, so it should be as error-free as possible. Call it from an HTTP server if you get errors outside of the request. If extrabody is None, a friendly but rather unhelpful error message is set in the body. If extrabody is a string, it will be appended as-is to the body. """ # The whole point of this function is to be a last line-of-defense # in handling errors. That is, it must not raise any errors itself; # it cannot be allowed to fail. Therefore, don't add to it! # In particular, don't call any other CP functions. body = b'Unrecoverable error in the server.' if extrabody is not None: if not isinstance(extrabody, bytes): extrabody = extrabody.encode('utf-8') body += b'\n' + extrabody return (b'500 Internal Server Error', [(b'Content-Type', b'text/plain'), (b'Content-Length', ntob(str(len(body)), 'ISO-8859-1'))], [body])
Python
def as_dict(self, raw=False, vars=None): """Convert an INI file to a dictionary""" # Load INI file into a dict result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = unrepr(value) except Exception: x = sys.exc_info()[1] msg = ('Config error in section: %r, option: %r, ' 'value: %r. Config values must be valid Python.' % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result
def as_dict(self, raw=False, vars=None): """Convert an INI file to a dictionary""" # Load INI file into a dict result = {} for section in self.sections(): if section not in result: result[section] = {} for option in self.options(section): value = self.get(section, option, raw=raw, vars=vars) try: value = unrepr(value) except Exception: x = sys.exc_info()[1] msg = ('Config error in section: %r, option: %r, ' 'value: %r. Config values must be valid Python.' % (section, option, value)) raise ValueError(msg, x.__class__.__name__, x.args) result[section][option] = value return result
Python
def astnode(self, s): """Return a Python3 ast Node compiled from a string.""" try: import ast except ImportError: # Fallback to eval when ast package is not available, # e.g. IronPython 1.0. return eval(s) p = ast.parse('__tempvalue__ = ' + s) return p.body[0].value
def astnode(self, s): """Return a Python3 ast Node compiled from a string.""" try: import ast except ImportError: # Fallback to eval when ast package is not available, # e.g. IronPython 1.0. return eval(s) p = ast.parse('__tempvalue__ = ' + s) return p.body[0].value
Python
def unrepr(s): """Return a Python object compiled from a string.""" if not s: return s b = _Builder() obj = b.astnode(s) return b.build(obj)
def unrepr(s): """Return a Python object compiled from a string.""" if not s: return s b = _Builder() obj = b.astnode(s) return b.build(obj)
Python
def attributes(full_attribute_name): """Load a module and retrieve an attribute of that module.""" # Parse out the path, module, and attribute last_dot = full_attribute_name.rfind('.') attr_name = full_attribute_name[last_dot + 1:] mod_path = full_attribute_name[:last_dot] mod = modules(mod_path) # Let an AttributeError propagate outward. try: attr = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) # Return a reference to the attribute. return attr
def attributes(full_attribute_name): """Load a module and retrieve an attribute of that module.""" # Parse out the path, module, and attribute last_dot = full_attribute_name.rfind('.') attr_name = full_attribute_name[last_dot + 1:] mod_path = full_attribute_name[:last_dot] mod = modules(mod_path) # Let an AttributeError propagate outward. try: attr = getattr(mod, attr_name) except AttributeError: raise AttributeError("'%s' object has no attribute '%s'" % (mod_path, attr_name)) # Return a reference to the attribute. return attr
Python
def kwargs(self): """Page handler kwargs (with cherrypy.request.params copied in).""" kwargs = cherrypy.serving.request.params.copy() if self._kwargs: kwargs.update(self._kwargs) return kwargs
def kwargs(self): """Page handler kwargs (with cherrypy.request.params copied in).""" kwargs = cherrypy.serving.request.params.copy() if self._kwargs: kwargs.update(self._kwargs) return kwargs
Python
def find_handler(self, path): """Return the appropriate page handler, plus any virtual path. This will return two objects. The first will be a callable, which can be used to generate page output. Any parameters from the query string or request body will be sent to that callable as keyword arguments. The callable is found by traversing the application's tree, starting from cherrypy.request.app.root, and matching path components to successive objects in the tree. For example, the URL "/path/to/handler" might return root.path.to.handler. The second object returned will be a list of names which are 'virtual path' components: parts of the URL which are dynamic, and were not used when looking up the handler. These virtual path components are passed to the handler as positional arguments. """ request = cherrypy.serving.request app = request.app root = app.root dispatch_name = self.dispatch_method_name # Get config for the root object/path. fullpath = [x for x in path.strip('/').split('/') if x] + ['index'] fullpath_len = len(fullpath) segleft = fullpath_len nodeconf = {} if hasattr(root, '_cp_config'): nodeconf.update(root._cp_config) if '/' in app.config: nodeconf.update(app.config['/']) object_trail = [['root', root, nodeconf, segleft]] node = root iternames = fullpath[:] while iternames: name = iternames[0] # map to legal Python identifiers (e.g. replace '.' with '_') objname = name.translate(self.translate) nodeconf = {} subnode = getattr(node, objname, None) pre_len = len(iternames) if subnode is None: dispatch = getattr(node, dispatch_name, None) if dispatch and hasattr(dispatch, '__call__') and not \ getattr(dispatch, 'exposed', False) and \ pre_len > 1: # Don't expose the hidden 'index' token to _cp_dispatch # We skip this if pre_len == 1 since it makes no sense # to call a dispatcher when we have no tokens left. index_name = iternames.pop() subnode = dispatch(vpath=iternames) iternames.append(index_name) else: # We didn't find a path, but keep processing in case there # is a default() handler. iternames.pop(0) else: # We found the path, remove the vpath entry iternames.pop(0) segleft = len(iternames) if segleft > pre_len: # No path segment was removed. Raise an error. raise cherrypy.CherryPyException( 'A vpath segment was added. Custom dispatchers may only ' 'remove elements. While trying to process ' '{0} in {1}'.format(name, fullpath) ) elif segleft == pre_len: # Assume that the handler used the current path segment, but # did not pop it. This allows things like # return getattr(self, vpath[0], None) iternames.pop(0) segleft -= 1 node = subnode if node is not None: # Get _cp_config attached to this node. if hasattr(node, '_cp_config'): nodeconf.update(node._cp_config) # Mix in values from app.config for this path. existing_len = fullpath_len - pre_len if existing_len != 0: curpath = '/' + '/'.join(fullpath[0:existing_len]) else: curpath = '' new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft] for seg in new_segs: curpath += '/' + seg if curpath in app.config: nodeconf.update(app.config[curpath]) object_trail.append([name, node, nodeconf, segleft]) def set_conf(): """Collapse all object_trail config into cherrypy.request.config. """ base = cherrypy.config.copy() # Note that we merge the config from each node # even if that node was None. for name, obj, conf, segleft in object_trail: base.update(conf) if 'tools.staticdir.dir' in conf: base['tools.staticdir.section'] = '/' + \ '/'.join(fullpath[0:fullpath_len - segleft]) return base # Try successive objects (reverse order) num_candidates = len(object_trail) - 1 for i in range(num_candidates, -1, -1): name, candidate, nodeconf, segleft = object_trail[i] if candidate is None: continue # Try a "default" method on the current leaf. if hasattr(candidate, 'default'): defhandler = candidate.default if getattr(defhandler, 'exposed', False): # Insert any extra _cp_config from the default handler. conf = getattr(defhandler, '_cp_config', {}) object_trail.insert( i + 1, ['default', defhandler, conf, segleft]) request.config = set_conf() # See https://github.com/cherrypy/cherrypy/issues/613 request.is_index = path.endswith('/') return defhandler, fullpath[fullpath_len - segleft:-1] # Uncomment the next line to restrict positional params to # "default". # if i < num_candidates - 2: continue # Try the current leaf. if getattr(candidate, 'exposed', False): request.config = set_conf() if i == num_candidates: # We found the extra ".index". Mark request so tools # can redirect if path_info has no trailing slash. request.is_index = True else: # We're not at an 'index' handler. Mark request so tools # can redirect if path_info has NO trailing slash. # Note that this also includes handlers which take # positional parameters (virtual paths). request.is_index = False return candidate, fullpath[fullpath_len - segleft:-1] # We didn't find anything request.config = set_conf() return None, []
def find_handler(self, path): """Return the appropriate page handler, plus any virtual path. This will return two objects. The first will be a callable, which can be used to generate page output. Any parameters from the query string or request body will be sent to that callable as keyword arguments. The callable is found by traversing the application's tree, starting from cherrypy.request.app.root, and matching path components to successive objects in the tree. For example, the URL "/path/to/handler" might return root.path.to.handler. The second object returned will be a list of names which are 'virtual path' components: parts of the URL which are dynamic, and were not used when looking up the handler. These virtual path components are passed to the handler as positional arguments. """ request = cherrypy.serving.request app = request.app root = app.root dispatch_name = self.dispatch_method_name # Get config for the root object/path. fullpath = [x for x in path.strip('/').split('/') if x] + ['index'] fullpath_len = len(fullpath) segleft = fullpath_len nodeconf = {} if hasattr(root, '_cp_config'): nodeconf.update(root._cp_config) if '/' in app.config: nodeconf.update(app.config['/']) object_trail = [['root', root, nodeconf, segleft]] node = root iternames = fullpath[:] while iternames: name = iternames[0] # map to legal Python identifiers (e.g. replace '.' with '_') objname = name.translate(self.translate) nodeconf = {} subnode = getattr(node, objname, None) pre_len = len(iternames) if subnode is None: dispatch = getattr(node, dispatch_name, None) if dispatch and hasattr(dispatch, '__call__') and not \ getattr(dispatch, 'exposed', False) and \ pre_len > 1: # Don't expose the hidden 'index' token to _cp_dispatch # We skip this if pre_len == 1 since it makes no sense # to call a dispatcher when we have no tokens left. index_name = iternames.pop() subnode = dispatch(vpath=iternames) iternames.append(index_name) else: # We didn't find a path, but keep processing in case there # is a default() handler. iternames.pop(0) else: # We found the path, remove the vpath entry iternames.pop(0) segleft = len(iternames) if segleft > pre_len: # No path segment was removed. Raise an error. raise cherrypy.CherryPyException( 'A vpath segment was added. Custom dispatchers may only ' 'remove elements. While trying to process ' '{0} in {1}'.format(name, fullpath) ) elif segleft == pre_len: # Assume that the handler used the current path segment, but # did not pop it. This allows things like # return getattr(self, vpath[0], None) iternames.pop(0) segleft -= 1 node = subnode if node is not None: # Get _cp_config attached to this node. if hasattr(node, '_cp_config'): nodeconf.update(node._cp_config) # Mix in values from app.config for this path. existing_len = fullpath_len - pre_len if existing_len != 0: curpath = '/' + '/'.join(fullpath[0:existing_len]) else: curpath = '' new_segs = fullpath[fullpath_len - pre_len:fullpath_len - segleft] for seg in new_segs: curpath += '/' + seg if curpath in app.config: nodeconf.update(app.config[curpath]) object_trail.append([name, node, nodeconf, segleft]) def set_conf(): """Collapse all object_trail config into cherrypy.request.config. """ base = cherrypy.config.copy() # Note that we merge the config from each node # even if that node was None. for name, obj, conf, segleft in object_trail: base.update(conf) if 'tools.staticdir.dir' in conf: base['tools.staticdir.section'] = '/' + \ '/'.join(fullpath[0:fullpath_len - segleft]) return base # Try successive objects (reverse order) num_candidates = len(object_trail) - 1 for i in range(num_candidates, -1, -1): name, candidate, nodeconf, segleft = object_trail[i] if candidate is None: continue # Try a "default" method on the current leaf. if hasattr(candidate, 'default'): defhandler = candidate.default if getattr(defhandler, 'exposed', False): # Insert any extra _cp_config from the default handler. conf = getattr(defhandler, '_cp_config', {}) object_trail.insert( i + 1, ['default', defhandler, conf, segleft]) request.config = set_conf() # See https://github.com/cherrypy/cherrypy/issues/613 request.is_index = path.endswith('/') return defhandler, fullpath[fullpath_len - segleft:-1] # Uncomment the next line to restrict positional params to # "default". # if i < num_candidates - 2: continue # Try the current leaf. if getattr(candidate, 'exposed', False): request.config = set_conf() if i == num_candidates: # We found the extra ".index". Mark request so tools # can redirect if path_info has no trailing slash. request.is_index = True else: # We're not at an 'index' handler. Mark request so tools # can redirect if path_info has NO trailing slash. # Note that this also includes handlers which take # positional parameters (virtual paths). request.is_index = False return candidate, fullpath[fullpath_len - segleft:-1] # We didn't find anything request.config = set_conf() return None, []
Python
def find_handler(self, path_info): """Find the right page handler, and set request.config.""" import routes request = cherrypy.serving.request config = routes.request_config() config.mapper = self.mapper if hasattr(request, 'wsgi_environ'): config.environ = request.wsgi_environ config.host = request.headers.get('Host', None) config.protocol = request.scheme config.redirect = self.redirect result = self.mapper.match(path_info) config.mapper_dict = result params = {} if result: params = result.copy() if not self.full_result: params.pop('controller', None) params.pop('action', None) request.params.update(params) # Get config for the root object/path. request.config = base = cherrypy.config.copy() curpath = '' def merge(nodeconf): if 'tools.staticdir.dir' in nodeconf: nodeconf['tools.staticdir.section'] = curpath or '/' base.update(nodeconf) app = request.app root = app.root if hasattr(root, '_cp_config'): merge(root._cp_config) if '/' in app.config: merge(app.config['/']) # Mix in values from app.config. atoms = [x for x in path_info.split('/') if x] if atoms: last = atoms.pop() else: last = None for atom in atoms: curpath = '/'.join((curpath, atom)) if curpath in app.config: merge(app.config[curpath]) handler = None if result: controller = result.get('controller') controller = self.controllers.get(controller, controller) if controller: if isinstance(controller, classtype): controller = controller() # Get config from the controller. if hasattr(controller, '_cp_config'): merge(controller._cp_config) action = result.get('action') if action is not None: handler = getattr(controller, action, None) # Get config from the handler if hasattr(handler, '_cp_config'): merge(handler._cp_config) else: handler = controller # Do the last path atom here so it can # override the controller's _cp_config. if last: curpath = '/'.join((curpath, last)) if curpath in app.config: merge(app.config[curpath]) return handler
def find_handler(self, path_info): """Find the right page handler, and set request.config.""" import routes request = cherrypy.serving.request config = routes.request_config() config.mapper = self.mapper if hasattr(request, 'wsgi_environ'): config.environ = request.wsgi_environ config.host = request.headers.get('Host', None) config.protocol = request.scheme config.redirect = self.redirect result = self.mapper.match(path_info) config.mapper_dict = result params = {} if result: params = result.copy() if not self.full_result: params.pop('controller', None) params.pop('action', None) request.params.update(params) # Get config for the root object/path. request.config = base = cherrypy.config.copy() curpath = '' def merge(nodeconf): if 'tools.staticdir.dir' in nodeconf: nodeconf['tools.staticdir.section'] = curpath or '/' base.update(nodeconf) app = request.app root = app.root if hasattr(root, '_cp_config'): merge(root._cp_config) if '/' in app.config: merge(app.config['/']) # Mix in values from app.config. atoms = [x for x in path_info.split('/') if x] if atoms: last = atoms.pop() else: last = None for atom in atoms: curpath = '/'.join((curpath, atom)) if curpath in app.config: merge(app.config[curpath]) handler = None if result: controller = result.get('controller') controller = self.controllers.get(controller, controller) if controller: if isinstance(controller, classtype): controller = controller() # Get config from the controller. if hasattr(controller, '_cp_config'): merge(controller._cp_config) action = result.get('action') if action is not None: handler = getattr(controller, action, None) # Get config from the handler if hasattr(handler, '_cp_config'): merge(handler._cp_config) else: handler = controller # Do the last path atom here so it can # override the controller's _cp_config. if last: curpath = '/'.join((curpath, last)) if curpath in app.config: merge(app.config[curpath]) return handler
Python
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains): """ Select a different handler based on the Host header. This can be useful when running multiple sites within one CP server. It allows several domains to point to different parts of a single website structure. For example:: http://www.domain.example -> root http://www.domain2.example -> root/domain2/ http://www.domain2.example:443 -> root/secure can be accomplished via the following config:: [/] request.dispatch = cherrypy.dispatch.VirtualHost( **{'www.domain2.example': '/domain2', 'www.domain2.example:443': '/secure', }) next_dispatcher The next dispatcher object in the dispatch chain. The VirtualHost dispatcher adds a prefix to the URL and calls another dispatcher. Defaults to cherrypy.dispatch.Dispatcher(). use_x_forwarded_host If True (the default), any "X-Forwarded-Host" request header will be used instead of the "Host" header. This is commonly added by HTTP servers (such as Apache) when proxying. ``**domains`` A dict of {host header value: virtual prefix} pairs. The incoming "Host" request header is looked up in this dict, and, if a match is found, the corresponding "virtual prefix" value will be prepended to the URL path before calling the next dispatcher. Note that you often need separate entries for "example.com" and "www.example.com". In addition, "Host" headers may contain the port number. """ from cherrypy.lib import httputil def vhost_dispatch(path_info): request = cherrypy.serving.request header = request.headers.get domain = header('Host', '') if use_x_forwarded_host: domain = header('X-Forwarded-Host', domain) prefix = domains.get(domain, '') if prefix: path_info = httputil.urljoin(prefix, path_info) result = next_dispatcher(path_info) # Touch up staticdir config. See # https://github.com/cherrypy/cherrypy/issues/614. section = request.config.get('tools.staticdir.section') if section: section = section[len(prefix):] request.config['tools.staticdir.section'] = section return result return vhost_dispatch
def VirtualHost(next_dispatcher=Dispatcher(), use_x_forwarded_host=True, **domains): """ Select a different handler based on the Host header. This can be useful when running multiple sites within one CP server. It allows several domains to point to different parts of a single website structure. For example:: http://www.domain.example -> root http://www.domain2.example -> root/domain2/ http://www.domain2.example:443 -> root/secure can be accomplished via the following config:: [/] request.dispatch = cherrypy.dispatch.VirtualHost( **{'www.domain2.example': '/domain2', 'www.domain2.example:443': '/secure', }) next_dispatcher The next dispatcher object in the dispatch chain. The VirtualHost dispatcher adds a prefix to the URL and calls another dispatcher. Defaults to cherrypy.dispatch.Dispatcher(). use_x_forwarded_host If True (the default), any "X-Forwarded-Host" request header will be used instead of the "Host" header. This is commonly added by HTTP servers (such as Apache) when proxying. ``**domains`` A dict of {host header value: virtual prefix} pairs. The incoming "Host" request header is looked up in this dict, and, if a match is found, the corresponding "virtual prefix" value will be prepended to the URL path before calling the next dispatcher. Note that you often need separate entries for "example.com" and "www.example.com". In addition, "Host" headers may contain the port number. """ from cherrypy.lib import httputil def vhost_dispatch(path_info): request = cherrypy.serving.request header = request.headers.get domain = header('Host', '') if use_x_forwarded_host: domain = header('X-Forwarded-Host', domain) prefix = domains.get(domain, '') if prefix: path_info = httputil.urljoin(prefix, path_info) result = next_dispatcher(path_info) # Touch up staticdir config. See # https://github.com/cherrypy/cherrypy/issues/614. section = request.config.get('tools.staticdir.section') if section: section = section[len(prefix):] request.config['tools.staticdir.section'] = section return result return vhost_dispatch
Python
def refine(self, train_set, epochs=100, algo='EM', weight_decay=0.0, batch_size=1000): """ Slightly optimized version of refine that does not load data separately for each tree. """ optimizers = None # --- sequential loader sequential_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=False) #num_workers=1) # --- indexed random batch loader to assign hidden values to samples indexed_dataset = IndexedDataset(train_set) indexed_loader = torch.utils.data.DataLoader( indexed_dataset, batch_size=batch_size, shuffle=True) #num_workers=1) if len(self.new_trees) == 0: self.new_trees = self.trees # create unshuffled target tensor if necessary if isinstance(train_set, torch.utils.data.TensorDataset): target_tensor = train_set.tensors[1] else: target_tensor = [] for (_, target) in sequential_loader: target_tensor.append(target) target_tensor = torch.cat(target_tensor, dim=0) del target if self.use_cuda: target_tensor = target_tensor.cuda() for epoch in range(1, epochs+1): total_f_loss = 0.0 if algo == 'alt': raise NotImplemented else: # algo == 'EM' # --- E-step for t in self.new_trees: t.eval() for batch_idx, (data, target) in enumerate(sequential_loader): for t in self.new_trees: t.eval() # compute hidden per sample per leaves raw with torch.no_grad(): accumulated_hidden_batch =\ t.root.EM_accumulate_hidden(data, target, path_prob=1) # normalize hidden stack = (batch_idx == len(sequential_loader)-1) # true for last batch t.root.EM_normalize_hidden(accumulated_hidden_batch, stack) for t in self.new_trees: t.train() # --- M-step if optimizers is not None: for batch_idx, (sample_idx, data, _) in enumerate(indexed_loader): for (t, o) in zip(self.new_trees, optimizers): total_f_loss += t.EM_M_Step(sample_idx, data, o) # compute leaf posterior... for t in self.new_trees: with torch.no_grad(): t.root.EM_compute_posterior(target_tensor) if optimizers is None: # do not optimize decisions in 1st step optimizers = [optim.Adam(t.parameters(), lr=0.001, weight_decay=weight_decay)\ for t in self.new_trees] if epoch == epochs: self.trees += self.new_trees self.new_trees = [] yield total_f_loss
def refine(self, train_set, epochs=100, algo='EM', weight_decay=0.0, batch_size=1000): """ Slightly optimized version of refine that does not load data separately for each tree. """ optimizers = None # --- sequential loader sequential_loader = torch.utils.data.DataLoader( train_set, batch_size=batch_size, shuffle=False) #num_workers=1) # --- indexed random batch loader to assign hidden values to samples indexed_dataset = IndexedDataset(train_set) indexed_loader = torch.utils.data.DataLoader( indexed_dataset, batch_size=batch_size, shuffle=True) #num_workers=1) if len(self.new_trees) == 0: self.new_trees = self.trees # create unshuffled target tensor if necessary if isinstance(train_set, torch.utils.data.TensorDataset): target_tensor = train_set.tensors[1] else: target_tensor = [] for (_, target) in sequential_loader: target_tensor.append(target) target_tensor = torch.cat(target_tensor, dim=0) del target if self.use_cuda: target_tensor = target_tensor.cuda() for epoch in range(1, epochs+1): total_f_loss = 0.0 if algo == 'alt': raise NotImplemented else: # algo == 'EM' # --- E-step for t in self.new_trees: t.eval() for batch_idx, (data, target) in enumerate(sequential_loader): for t in self.new_trees: t.eval() # compute hidden per sample per leaves raw with torch.no_grad(): accumulated_hidden_batch =\ t.root.EM_accumulate_hidden(data, target, path_prob=1) # normalize hidden stack = (batch_idx == len(sequential_loader)-1) # true for last batch t.root.EM_normalize_hidden(accumulated_hidden_batch, stack) for t in self.new_trees: t.train() # --- M-step if optimizers is not None: for batch_idx, (sample_idx, data, _) in enumerate(indexed_loader): for (t, o) in zip(self.new_trees, optimizers): total_f_loss += t.EM_M_Step(sample_idx, data, o) # compute leaf posterior... for t in self.new_trees: with torch.no_grad(): t.root.EM_compute_posterior(target_tensor) if optimizers is None: # do not optimize decisions in 1st step optimizers = [optim.Adam(t.parameters(), lr=0.001, weight_decay=weight_decay)\ for t in self.new_trees] if epoch == epochs: self.trees += self.new_trees self.new_trees = [] yield total_f_loss
Python
def fit_greedy(self, train_set, epochs, algo, steepness_inc, max_depth=4, n_max_nodes=0, weight_decay=0.0): """ Function to invoke the greedy tree building procedure described in the paper. train_set: a pytorch dataset which can be used with the pytorch dataloader. epochs: number of epochs to train each decision. algo: choose between Expectation-Maximization (EM) or alternating approach (alt) described in supplementary. steepness_inc: increase of steepness hyperparameter per epoch. max_depth: limit the depth of the resulting decision tree. It can also be used to learn extend the depth of an existing decision tree. n_max_nodes: if set > 0, the tree will be built in best first manner and max_depth is ignored. At the moment, this will always build a new tree from scratch. weight_decay: L2 regularization of parameters as described in the pytorch documentation of the Adam optimizer. """ train_set = MaskedDataset(train_set) if n_max_nodes > 0: # max_depth is not valid here... # for best first #print("Best first training!") path_end_nodes = self.root.fit_greedy(train_set, epochs, algo, steepness_inc, weight_decay) splitted = 0 while len(path_end_nodes) < n_max_nodes: node_to_split = max(path_end_nodes, key=lambda node: node.total_info_gain) #print("split node with info gain {}".\ # format(node_to_split.total_info_gain)) node_to_split.split(self.initial_steepness) splitted += 1 node_to_split.node_id = splitted path_end_nodes = self.root.fit_greedy(train_set, epochs, algo, steepness_inc, weight_decay) #print("gains") #self.root.foreach(lambda x: print(x.total_info_gain) if x._path_end else False) else: # depth first #print("Depth first training!") splitted = 0 need_fit = True while need_fit: need_fit = False path_end_nodes = self.root.fit_greedy(train_set, epochs, algo, steepness_inc, weight_decay) for node in path_end_nodes: if node.depth < max_depth and node.splitable: #print("split node at depth {}".format(node.depth)) node.split(self.initial_steepness) if node.depth != max_depth-1: need_fit = True splitted += 1 node.node_id = splitted #print("finished fit")
def fit_greedy(self, train_set, epochs, algo, steepness_inc, max_depth=4, n_max_nodes=0, weight_decay=0.0): """ Function to invoke the greedy tree building procedure described in the paper. train_set: a pytorch dataset which can be used with the pytorch dataloader. epochs: number of epochs to train each decision. algo: choose between Expectation-Maximization (EM) or alternating approach (alt) described in supplementary. steepness_inc: increase of steepness hyperparameter per epoch. max_depth: limit the depth of the resulting decision tree. It can also be used to learn extend the depth of an existing decision tree. n_max_nodes: if set > 0, the tree will be built in best first manner and max_depth is ignored. At the moment, this will always build a new tree from scratch. weight_decay: L2 regularization of parameters as described in the pytorch documentation of the Adam optimizer. """ train_set = MaskedDataset(train_set) if n_max_nodes > 0: # max_depth is not valid here... # for best first #print("Best first training!") path_end_nodes = self.root.fit_greedy(train_set, epochs, algo, steepness_inc, weight_decay) splitted = 0 while len(path_end_nodes) < n_max_nodes: node_to_split = max(path_end_nodes, key=lambda node: node.total_info_gain) #print("split node with info gain {}".\ # format(node_to_split.total_info_gain)) node_to_split.split(self.initial_steepness) splitted += 1 node_to_split.node_id = splitted path_end_nodes = self.root.fit_greedy(train_set, epochs, algo, steepness_inc, weight_decay) #print("gains") #self.root.foreach(lambda x: print(x.total_info_gain) if x._path_end else False) else: # depth first #print("Depth first training!") splitted = 0 need_fit = True while need_fit: need_fit = False path_end_nodes = self.root.fit_greedy(train_set, epochs, algo, steepness_inc, weight_decay) for node in path_end_nodes: if node.depth < max_depth and node.splitable: #print("split node at depth {}".format(node.depth)) node.split(self.initial_steepness) if node.depth != max_depth-1: need_fit = True splitted += 1 node.node_id = splitted #print("finished fit")
Python
def refine(self, train_set, epochs=100, algo='EM', weight_decay=0.0): """ This function is responsible for the actual training of the splits. It is not only used for refinement, but also for greedy fitting. When doing a greedy fit a tree stump is refined. Parameters should be self-explanatory. """ #print(algo) optimizer = None # --- sequential loader sequential_loader = torch.utils.data.DataLoader( train_set, batch_size=self.batch_size, shuffle=False) #num_workers=1) # create unshuffled target tensor if necessary target_tensor = [] for (_, target) in sequential_loader: target_tensor.append(target) target_tensor = torch.cat(target_tensor, dim=0) del target, sequential_loader if self.use_cuda: target_tensor = target_tensor.cuda() #print("Epochs: {}".format(epochs)) for epoch in range(1, epochs+1): if algo == 'alt': optimizer, f_loss = self.alt_refine_step(train_set, optimizer) else: # algo == 'EM' optimizer, f_loss = self.EM_refine_step(train_set, optimizer, target_tensor) if optimizer is None: # do not optimize decisions in 1st step #print("wd = {}".format(weight_decay)) optimizer = optim.Adam(self.parameters(), lr=0.001, weight_decay=weight_decay) yield f_loss
def refine(self, train_set, epochs=100, algo='EM', weight_decay=0.0): """ This function is responsible for the actual training of the splits. It is not only used for refinement, but also for greedy fitting. When doing a greedy fit a tree stump is refined. Parameters should be self-explanatory. """ #print(algo) optimizer = None # --- sequential loader sequential_loader = torch.utils.data.DataLoader( train_set, batch_size=self.batch_size, shuffle=False) #num_workers=1) # create unshuffled target tensor if necessary target_tensor = [] for (_, target) in sequential_loader: target_tensor.append(target) target_tensor = torch.cat(target_tensor, dim=0) del target, sequential_loader if self.use_cuda: target_tensor = target_tensor.cuda() #print("Epochs: {}".format(epochs)) for epoch in range(1, epochs+1): if algo == 'alt': optimizer, f_loss = self.alt_refine_step(train_set, optimizer) else: # algo == 'EM' optimizer, f_loss = self.EM_refine_step(train_set, optimizer, target_tensor) if optimizer is None: # do not optimize decisions in 1st step #print("wd = {}".format(weight_decay)) optimizer = optim.Adam(self.parameters(), lr=0.001, weight_decay=weight_decay) yield f_loss
Python
def download_extension(extension_id, browser_version): """ Download the crx file for the extension with the given extension id :param extension_id: The id of the extension :param browser_version: The Chrome version :return: A tuple with the (filename, path) of the downloaded crx file """ crx_url = get_crx_url(extension_id, browser_version) print 'Downloading {crx_url}...'.format(crx_url=crx_url) status_code = urllib.urlopen(crx_url).getcode() if status_code == 200: # TODO: Replace the filename with the name of the Chrome extension filename = '{0}.crx'.format(extension_id) dst_path = os.path.join(get_script_path(), filename) try: urllib.urlretrieve(crx_url, dst_path) except Exception as e: print 'Couldn\'t download the crx file ({0})'.format(e) return None else: print 'Chrome extension crx file downloaded successfully' return (filename, dst_path) else: print 'Couldn\'t download the crx file (status code: {0})'.format(status_code) return None
def download_extension(extension_id, browser_version): """ Download the crx file for the extension with the given extension id :param extension_id: The id of the extension :param browser_version: The Chrome version :return: A tuple with the (filename, path) of the downloaded crx file """ crx_url = get_crx_url(extension_id, browser_version) print 'Downloading {crx_url}...'.format(crx_url=crx_url) status_code = urllib.urlopen(crx_url).getcode() if status_code == 200: # TODO: Replace the filename with the name of the Chrome extension filename = '{0}.crx'.format(extension_id) dst_path = os.path.join(get_script_path(), filename) try: urllib.urlretrieve(crx_url, dst_path) except Exception as e: print 'Couldn\'t download the crx file ({0})'.format(e) return None else: print 'Chrome extension crx file downloaded successfully' return (filename, dst_path) else: print 'Couldn\'t download the crx file (status code: {0})'.format(status_code) return None
Python
def extract_archive(filename, path): """ Extract the contents of the given archive :param filename: The name of the crx file :param path: The path to the crx file :return: Whether the contents of the crx file were extracted successfully or not """ dst_dir = os.path.join(get_script_path(), filename[:-4]) if not os.path.exists(dst_dir): os.makedirs(dst_dir) print 'Directory {dir} created'.format( dir=filename[:-4]) try: print 'Extracting the contents of {0}...'.format(filename) zip_ref = zipfile.ZipFile(path, 'r') zip_ref.extractall(dst_dir) zip_ref.close() except Exception as e: print 'Couldn\'t extract the contents of the crx file ({0})'.format(e) return False else: print 'Extracted successfully' return True
def extract_archive(filename, path): """ Extract the contents of the given archive :param filename: The name of the crx file :param path: The path to the crx file :return: Whether the contents of the crx file were extracted successfully or not """ dst_dir = os.path.join(get_script_path(), filename[:-4]) if not os.path.exists(dst_dir): os.makedirs(dst_dir) print 'Directory {dir} created'.format( dir=filename[:-4]) try: print 'Extracting the contents of {0}...'.format(filename) zip_ref = zipfile.ZipFile(path, 'r') zip_ref.extractall(dst_dir) zip_ref.close() except Exception as e: print 'Couldn\'t extract the contents of the crx file ({0})'.format(e) return False else: print 'Extracted successfully' return True
Python
def append(self, sub): """ appends sub string to the existing string :param sub: sub-string to be appended :return: :boolean: True if the sub string appended correctly False if the string length after the appending will exceed the maximum length """ length = len(sub) if self.capacity is not None \ and self.length + length > self.capacity: return False self.subs.append(str(sub)) self.length += length return self
def append(self, sub): """ appends sub string to the existing string :param sub: sub-string to be appended :return: :boolean: True if the sub string appended correctly False if the string length after the appending will exceed the maximum length """ length = len(sub) if self.capacity is not None \ and self.length + length > self.capacity: return False self.subs.append(str(sub)) self.length += length return self
Python
def delete(self, start=None, end=None): """ deleted sub-string starting from start and end at end - 1 if end is None : delete all starting from the start index if start is None : delete all and stop a index-1 if neither is none: delete the subset between start and end if both are none : empty the string builder :param start: starting index :param end: ending indes """ if (end != None) and (start != None) and (end < start): raise RuntimeError("end must be bigger than or equal start") if start is not None: if end is not None: # start = #, end = # # delete items within the range self._delete_subset(start, end) else: # start = #, end = None # delete items after the start index self._delete_all_after(start) else: if end is not None: # start = None, end = # # delete items before the end index self._delete_all_before(end) else: # delete all self.subs = [] self.length = 0 return self
def delete(self, start=None, end=None): """ deleted sub-string starting from start and end at end - 1 if end is None : delete all starting from the start index if start is None : delete all and stop a index-1 if neither is none: delete the subset between start and end if both are none : empty the string builder :param start: starting index :param end: ending indes """ if (end != None) and (start != None) and (end < start): raise RuntimeError("end must be bigger than or equal start") if start is not None: if end is not None: # start = #, end = # # delete items within the range self._delete_subset(start, end) else: # start = #, end = None # delete items after the start index self._delete_all_after(start) else: if end is not None: # start = None, end = # # delete items before the end index self._delete_all_before(end) else: # delete all self.subs = [] self.length = 0 return self
Python
def _delete_all_before(self, index): """ delete all the characters before the index :type index: int """ if index > self.length - 1 or index < 0: raise RuntimeError("index must be (0 <= index <= length-1)") idx = 0 for i in range(len(self.subs)): sub = self.subs[i] if idx < index: if index < (i + len(sub) - 1): self.subs[i] = sub[(index - i):] else: self.subs[i] = "" idx += len(sub) self.length -= index
def _delete_all_before(self, index): """ delete all the characters before the index :type index: int """ if index > self.length - 1 or index < 0: raise RuntimeError("index must be (0 <= index <= length-1)") idx = 0 for i in range(len(self.subs)): sub = self.subs[i] if idx < index: if index < (i + len(sub) - 1): self.subs[i] = sub[(index - i):] else: self.subs[i] = "" idx += len(sub) self.length -= index
Python
def _delete_all_after(self, index): """ delete all the characters after the index :type index: int """ if index > self.length - 1 or index < 0: raise RuntimeError("index must be (0 <= index <= length-1)") idx = 0 for i in range(len(self.subs)): sub = self.subs[i] sub_length = len(sub) if idx <= index < (idx + sub_length): self.subs[i] = sub[:(index - idx + 1)] elif idx > index: self.subs[i] = "" idx += len(sub) self.length -= (self.length - index - 1)
def _delete_all_after(self, index): """ delete all the characters after the index :type index: int """ if index > self.length - 1 or index < 0: raise RuntimeError("index must be (0 <= index <= length-1)") idx = 0 for i in range(len(self.subs)): sub = self.subs[i] sub_length = len(sub) if idx <= index < (idx + sub_length): self.subs[i] = sub[:(index - idx + 1)] elif idx > index: self.subs[i] = "" idx += len(sub) self.length -= (self.length - index - 1)
Python
def _delete_subset(self, start, end): """ delete substring starting from start and ends before end :param start: starting index :param end: stopping index """ if end < start: raise RuntimeError("end must be bigger than or equal start") if start > self.length - 1 or start < 0: raise RuntimeError("start must be (0 <= index <= length-1)") if end > self.length - 1 or end < 0: raise RuntimeError("end must be (0 <= index <= length-1)") idx = 0 started = False for i in range(len(self.subs)): sub = self.subs[i] sub_length = len(sub) if started: if idx <= end < (idx + sub_length): self.subs[i] = sub[(end - idx):] elif end < idx: break else: self.subs[i] = "" if idx <= start < (idx + sub_length): if i <= end < (i + sub_length): self.subs[i] = sub[:(start - idx)] + sub[(end - idx):] else: started = True self.subs[i] = sub[:(start - idx)] idx += sub_length # TODO: Update Length
def _delete_subset(self, start, end): """ delete substring starting from start and ends before end :param start: starting index :param end: stopping index """ if end < start: raise RuntimeError("end must be bigger than or equal start") if start > self.length - 1 or start < 0: raise RuntimeError("start must be (0 <= index <= length-1)") if end > self.length - 1 or end < 0: raise RuntimeError("end must be (0 <= index <= length-1)") idx = 0 started = False for i in range(len(self.subs)): sub = self.subs[i] sub_length = len(sub) if started: if idx <= end < (idx + sub_length): self.subs[i] = sub[(end - idx):] elif end < idx: break else: self.subs[i] = "" if idx <= start < (idx + sub_length): if i <= end < (i + sub_length): self.subs[i] = sub[:(start - idx)] + sub[(end - idx):] else: started = True self.subs[i] = sub[:(start - idx)] idx += sub_length # TODO: Update Length
Python
def replace(self, old, new, start=None, end=None): """ replace all the characters between the range(start, end) :param old: to be replaced character :param new: new character :param start: starting index :param end: stopping index """ if (end != None) and (start != None): if end < start: raise RuntimeError("end must be bigger than or equal start") if start > self.length - 1 or start < 0: raise RuntimeError("start must be (0 <= index <= length-1)") if end > self.length - 1 or end < 0: raise RuntimeError("end must be (0 <= index <= length-1)") index = 0 for i_subs in range(len(self.subs)): sub = self.subs[i_subs] for i_sub in range(len(sub)): if (start is None or start <= index) \ and (end is None or end > index) \ and sub[i_sub] == old: self.subs[i_subs] = sub[:i_sub] + new + sub[i_sub + 1:] # TODO: Update Length. return self
def replace(self, old, new, start=None, end=None): """ replace all the characters between the range(start, end) :param old: to be replaced character :param new: new character :param start: starting index :param end: stopping index """ if (end != None) and (start != None): if end < start: raise RuntimeError("end must be bigger than or equal start") if start > self.length - 1 or start < 0: raise RuntimeError("start must be (0 <= index <= length-1)") if end > self.length - 1 or end < 0: raise RuntimeError("end must be (0 <= index <= length-1)") index = 0 for i_subs in range(len(self.subs)): sub = self.subs[i_subs] for i_sub in range(len(sub)): if (start is None or start <= index) \ and (end is None or end > index) \ and sub[i_sub] == old: self.subs[i_subs] = sub[:i_sub] + new + sub[i_sub + 1:] # TODO: Update Length. return self
Python
def cli(input, output=None, input_format=None, output_format=None, index_slot=None, schema=None, target_class=None) -> None: """ Converts to/from TSV to rich LinkML instance format (JSON/YAML/RDF) """ python_module = make_python(schema) target_class = python_module.__dict__[target_class] schema = YAMLGenerator(schema).schema input_format = _get_format(input, input_format) output_format = _get_format(output, output_format) loader = get_loader(input_format) dumper = get_dumper(output_format) if _is_xsv(input_format): obj = loader.load(source=input, target_class=target_class, schema=schema, index_slot=index_slot) else: obj = loader.load(source=input, target_class=target_class) if _is_xsv(output_format): obj = dumper.dump(obj, output, schema=schema, index_slot=index_slot) else: obj = dumper.dump(obj, output)
def cli(input, output=None, input_format=None, output_format=None, index_slot=None, schema=None, target_class=None) -> None: """ Converts to/from TSV to rich LinkML instance format (JSON/YAML/RDF) """ python_module = make_python(schema) target_class = python_module.__dict__[target_class] schema = YAMLGenerator(schema).schema input_format = _get_format(input, input_format) output_format = _get_format(output, output_format) loader = get_loader(input_format) dumper = get_dumper(output_format) if _is_xsv(input_format): obj = loader.load(source=input, target_class=target_class, schema=schema, index_slot=index_slot) else: obj = loader.load(source=input, target_class=target_class) if _is_xsv(output_format): obj = dumper.dump(obj, output, schema=schema, index_slot=index_slot) else: obj = dumper.dump(obj, output)
Python
def log(message): ''' If we're in debug-mode we should show a lot more output ''' if os.environ.get('DEBUG'): print message
def log(message): ''' If we're in debug-mode we should show a lot more output ''' if os.environ.get('DEBUG'): print message
Python
def fix_user(user_string): ''' Cleanup the user string in the status object to only contain username. ''' return user_string.replace('(av ', '').replace(')', '')
def fix_user(user_string): ''' Cleanup the user string in the status object to only contain username. ''' return user_string.replace('(av ', '').replace(')', '')
Python
def fix_date(date_string): ''' Convert the Sectore Alarm way of stating dates to something sane (ISO compliant). ''' result = "" try: epoch = re.search(r'\/Date\(([0-9]+?)\)\/', date_string).group(1) date = datetime.datetime.fromtimestamp(int(epoch)/1000) result = date.isoformat() except AttributeError: result = "" return result
def fix_date(date_string): ''' Convert the Sectore Alarm way of stating dates to something sane (ISO compliant). ''' result = "" try: epoch = re.search(r'\/Date\(([0-9]+?)\)\/', date_string).group(1) date = datetime.datetime.fromtimestamp(int(epoch)/1000) result = date.isoformat() except AttributeError: result = "" return result
Python
def __get_token(self): ''' Do an initial request to get the CSRF-token from the login form. ''' response = self.session.get(LOGINPAGE) parser = ParseHTMLToken() parser.feed(response.text) if not parser.tokens[0]: raise Exception('Could not find CSRF-token.') return parser.tokens[0]
def __get_token(self): ''' Do an initial request to get the CSRF-token from the login form. ''' response = self.session.get(LOGINPAGE) parser = ParseHTMLToken() parser.feed(response.text) if not parser.tokens[0]: raise Exception('Could not find CSRF-token.') return parser.tokens[0]
Python
def __get_status(self): ''' Fetch and parse the actual alarm status page. ''' response = self.session.post(STATUSPAGE) return {'ArmedStatus': response.json().get('Panel', {}).get('ArmedStatus', None)}
def __get_status(self): ''' Fetch and parse the actual alarm status page. ''' response = self.session.post(STATUSPAGE) return {'ArmedStatus': response.json().get('Panel', {}).get('ArmedStatus', None)}
Python
def __get_log(self): ''' Fetch and parse the event log page. ''' response = self.session.get(LOGPAGE + config.siteid) event_log = [] for row in (response.json())['LogDetails']: row_data = row.copy() row_data['Time'] = fix_date(row_data.get('Time', None)) event_log.append(row_data) return event_log
def __get_log(self): ''' Fetch and parse the event log page. ''' response = self.session.get(LOGPAGE + config.siteid) event_log = [] for row in (response.json())['LogDetails']: row_data = row.copy() row_data['Time'] = fix_date(row_data.get('Time', None)) event_log.append(row_data) return event_log
Python
def __save_cookies(self): ''' Store the cookie-jar on disk to avoid having to login each time the script is run. ''' with open(COOKIEFILE, 'w') as cookie_file: json.dump( requests.utils.dict_from_cookiejar(self.session.cookies), cookie_file ) log('Saved {0} cookie values'.format( len(requests.utils.dict_from_cookiejar( self.session.cookies).keys())))
def __save_cookies(self): ''' Store the cookie-jar on disk to avoid having to login each time the script is run. ''' with open(COOKIEFILE, 'w') as cookie_file: json.dump( requests.utils.dict_from_cookiejar(self.session.cookies), cookie_file ) log('Saved {0} cookie values'.format( len(requests.utils.dict_from_cookiejar( self.session.cookies).keys())))
Python
def __load_cookies(self): ''' Load the cookies from the cookie-jar to avoid logging in again if the session still is valid. ''' try: with open(COOKIEFILE, 'r') as cookie_file: self.session.cookies = requests.utils.cookiejar_from_dict( json.load(cookie_file) ) except IOError, message: if str(message)[:35] != '[Errno 2] No such file or directory': raise message log('Loaded {0} cookie values'.format( len(requests.utils.dict_from_cookiejar( self.session.cookies).keys())))
def __load_cookies(self): ''' Load the cookies from the cookie-jar to avoid logging in again if the session still is valid. ''' try: with open(COOKIEFILE, 'r') as cookie_file: self.session.cookies = requests.utils.cookiejar_from_dict( json.load(cookie_file) ) except IOError, message: if str(message)[:35] != '[Errno 2] No such file or directory': raise message log('Loaded {0} cookie values'.format( len(requests.utils.dict_from_cookiejar( self.session.cookies).keys())))
Python
def __is_logged_in(self): ''' Check if we're logged in. Returns bool ''' response = self.session.get(CHECKPAGE) loggedin = ('frmLogin' not in response.text) return loggedin
def __is_logged_in(self): ''' Check if we're logged in. Returns bool ''' response = self.session.get(CHECKPAGE) loggedin = ('frmLogin' not in response.text) return loggedin
Python
def __login(self): ''' Login to the site if we're not logged in already. First try any existing session from the stored cookie. If that fails we should login again. ''' self.__load_cookies() if not self.__is_logged_in(): log('Logging in') form_data = { 'userID': self.config.email, 'password': self.config.password } self.session = requests.Session() # Get CSRF-token and add it to the form data. form_data['__RequestVerificationToken'] = self.__get_token() # Do the actual logging in. self.session.post(LOGINPAGE + '?Returnurl=~%2F', data=form_data) # Save the cookies to file. self.__save_cookies() else: log('Already logged in')
def __login(self): ''' Login to the site if we're not logged in already. First try any existing session from the stored cookie. If that fails we should login again. ''' self.__load_cookies() if not self.__is_logged_in(): log('Logging in') form_data = { 'userID': self.config.email, 'password': self.config.password } self.session = requests.Session() # Get CSRF-token and add it to the form data. form_data['__RequestVerificationToken'] = self.__get_token() # Do the actual logging in. self.session.post(LOGINPAGE + '?Returnurl=~%2F', data=form_data) # Save the cookies to file. self.__save_cookies() else: log('Already logged in')
Python
def event_log(self): ''' Retrive the event log, login if neccesary. ''' self.__login() # Get event log return self.__get_log()
def event_log(self): ''' Retrive the event log, login if neccesary. ''' self.__login() # Get event log return self.__get_log()
Python
def status(self): ''' Wrapper function for logging in and fetching the status of the alarm in one go that returns a dict. ''' self.__login() # Get the status status = self.__get_status() return status
def status(self): ''' Wrapper function for logging in and fetching the status of the alarm in one go that returns a dict. ''' self.__login() # Get the status status = self.__get_status() return status
Python
def echo(): ''' Receive request. Fetch request body and return as is :return: ''' if request.data: res = request.data.decode("UTF-8") else: return Response(response="You should send something data", status=400) return Response(response=res, status=200, headers={"content-type":request.content_type})
def echo(): ''' Receive request. Fetch request body and return as is :return: ''' if request.data: res = request.data.decode("UTF-8") else: return Response(response="You should send something data", status=400) return Response(response=res, status=200, headers={"content-type":request.content_type})
Python
def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False): #lr was 0.009 """ Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID. Arguments: X -- data, numpy array of shape (number of examples, num_px * num_px * 3) Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) layers_dims -- list containing the input size and each layer size, of length (number of layers + 1). learning_rate -- learning rate of the gradient descent update rule num_iterations -- number of iterations of the optimization loop print_cost -- if True, it prints the cost every 100 steps Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(1) costs = [] # keep track of cost # Parameters initialization. ### START CODE HERE ### parameters = initialize_parameters_deep(layers_dims) ### END CODE HERE ### # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID. ### START CODE HERE ### (β‰ˆ 1 line of code) AL, caches = L_model_forward(X, parameters) ### END CODE HERE ### # Compute cost. ### START CODE HERE ### (β‰ˆ 1 line of code) cost = compute_cost(AL, Y) ### END CODE HERE ### # Backward propagation. ### START CODE HERE ### (β‰ˆ 1 line of code) grads = L_model_backward(AL, Y, caches) ### END CODE HERE ### # Update parameters. ### START CODE HERE ### (β‰ˆ 1 line of code) parameters = update_parameters(parameters, grads, learning_rate) ### END CODE HERE ### # Print the cost every 100 training example if print_cost and i % 100 == 0: print ("Cost after iteration %i: %f" % (i, cost)) if print_cost and i % 100 == 0: costs.append(cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters
def L_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False): #lr was 0.009 """ Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID. Arguments: X -- data, numpy array of shape (number of examples, num_px * num_px * 3) Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples) layers_dims -- list containing the input size and each layer size, of length (number of layers + 1). learning_rate -- learning rate of the gradient descent update rule num_iterations -- number of iterations of the optimization loop print_cost -- if True, it prints the cost every 100 steps Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(1) costs = [] # keep track of cost # Parameters initialization. ### START CODE HERE ### parameters = initialize_parameters_deep(layers_dims) ### END CODE HERE ### # Loop (gradient descent) for i in range(0, num_iterations): # Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID. ### START CODE HERE ### (β‰ˆ 1 line of code) AL, caches = L_model_forward(X, parameters) ### END CODE HERE ### # Compute cost. ### START CODE HERE ### (β‰ˆ 1 line of code) cost = compute_cost(AL, Y) ### END CODE HERE ### # Backward propagation. ### START CODE HERE ### (β‰ˆ 1 line of code) grads = L_model_backward(AL, Y, caches) ### END CODE HERE ### # Update parameters. ### START CODE HERE ### (β‰ˆ 1 line of code) parameters = update_parameters(parameters, grads, learning_rate) ### END CODE HERE ### # Print the cost every 100 training example if print_cost and i % 100 == 0: print ("Cost after iteration %i: %f" % (i, cost)) if print_cost and i % 100 == 0: costs.append(cost) # plot the cost plt.plot(np.squeeze(costs)) plt.ylabel('cost') plt.xlabel('iterations (per tens)') plt.title("Learning rate =" + str(learning_rate)) plt.show() return parameters
Python
def lstm_backward(da, caches): """ Implement the backward pass for the RNN with LSTM-cell (over a whole sequence). Arguments: da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x) caches -- cache storing information from the forward pass (lstm_forward) Returns: gradients -- python dictionary containing: dx -- Gradient of inputs, of shape (n_x, m, T_x) da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m) dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x) dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x) dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1) dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1) dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1) dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1) """ # Retrieve values from the first cache (t=1) of caches. (caches, x) = caches (a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0] ### START CODE HERE ### # Retrieve dimensions from da's and x1's shapes (β‰ˆ2 lines) n_a, m, T_x = da.shape n_x, m = x1.shape # initialize the gradients with the right sizes (β‰ˆ12 lines) dx = np.zeros((n_x, m, T_x)) da0 = np.zeros((n_a, m)) da_prevt = np.zeros((n_a, m)) dc_prevt = np.zeros((n_a, m)) dWf = np.zeros((n_a, n_a + n_x)) dWi = np.zeros((n_a, n_a + n_x)) dWc = np.zeros((n_a, n_a + n_x)) dWo = np.zeros((n_a, n_a + n_x)) dbf = np.zeros((n_a, 1)) dbi = np.zeros((n_a, 1)) dbc = np.zeros((n_a, 1)) dbo = np.zeros((n_a, 1)) # loop back over the whole sequence for t in reversed(range(T_x)): # Compute all gradients using lstm_cell_backward gradients = lstm_cell_backward(da[:,:,t]+da_prevt, dc_prevt, caches[t]) # Store or add the gradient to the parameters' previous step's gradient da_prevt = gradients["da_prev"] dc_prevt = gradients["dc_prev"] dx[:,:,t] = gradients["dxt"] dWf += gradients["dWf"] dWi += gradients["dWi"] dWc += gradients["dWc"] dWo += gradients["dWo"] dbf += gradients["dbf"] dbi += gradients["dbi"] dbc += gradients["dbc"] dbo += gradients["dbo"] # Set the first activation's gradient to the backpropagated gradient da_prev. da0 = da_prevt ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dx": dx, "da0": da0, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi, "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo} return gradients
def lstm_backward(da, caches): """ Implement the backward pass for the RNN with LSTM-cell (over a whole sequence). Arguments: da -- Gradients w.r.t the hidden states, numpy-array of shape (n_a, m, T_x) caches -- cache storing information from the forward pass (lstm_forward) Returns: gradients -- python dictionary containing: dx -- Gradient of inputs, of shape (n_x, m, T_x) da0 -- Gradient w.r.t. the previous hidden state, numpy array of shape (n_a, m) dWf -- Gradient w.r.t. the weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) dWi -- Gradient w.r.t. the weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) dWc -- Gradient w.r.t. the weight matrix of the memory gate, numpy array of shape (n_a, n_a + n_x) dWo -- Gradient w.r.t. the weight matrix of the save gate, numpy array of shape (n_a, n_a + n_x) dbf -- Gradient w.r.t. biases of the forget gate, of shape (n_a, 1) dbi -- Gradient w.r.t. biases of the update gate, of shape (n_a, 1) dbc -- Gradient w.r.t. biases of the memory gate, of shape (n_a, 1) dbo -- Gradient w.r.t. biases of the save gate, of shape (n_a, 1) """ # Retrieve values from the first cache (t=1) of caches. (caches, x) = caches (a1, c1, a0, c0, f1, i1, cc1, o1, x1, parameters) = caches[0] ### START CODE HERE ### # Retrieve dimensions from da's and x1's shapes (β‰ˆ2 lines) n_a, m, T_x = da.shape n_x, m = x1.shape # initialize the gradients with the right sizes (β‰ˆ12 lines) dx = np.zeros((n_x, m, T_x)) da0 = np.zeros((n_a, m)) da_prevt = np.zeros((n_a, m)) dc_prevt = np.zeros((n_a, m)) dWf = np.zeros((n_a, n_a + n_x)) dWi = np.zeros((n_a, n_a + n_x)) dWc = np.zeros((n_a, n_a + n_x)) dWo = np.zeros((n_a, n_a + n_x)) dbf = np.zeros((n_a, 1)) dbi = np.zeros((n_a, 1)) dbc = np.zeros((n_a, 1)) dbo = np.zeros((n_a, 1)) # loop back over the whole sequence for t in reversed(range(T_x)): # Compute all gradients using lstm_cell_backward gradients = lstm_cell_backward(da[:,:,t]+da_prevt, dc_prevt, caches[t]) # Store or add the gradient to the parameters' previous step's gradient da_prevt = gradients["da_prev"] dc_prevt = gradients["dc_prev"] dx[:,:,t] = gradients["dxt"] dWf += gradients["dWf"] dWi += gradients["dWi"] dWc += gradients["dWc"] dWo += gradients["dWo"] dbf += gradients["dbf"] dbi += gradients["dbi"] dbc += gradients["dbc"] dbo += gradients["dbo"] # Set the first activation's gradient to the backpropagated gradient da_prev. da0 = da_prevt ### END CODE HERE ### # Store the gradients in a python dictionary gradients = {"dx": dx, "da0": da0, "dWf": dWf,"dbf": dbf, "dWi": dWi,"dbi": dbi, "dWc": dWc,"dbc": dbc, "dWo": dWo,"dbo": dbo} return gradients
Python
def gradient_check(x, theta, epsilon = 1e-7): """ Implement the backward propagation presented in Figure 1. Arguments: x -- a real-valued input theta -- our parameter, a real number as well epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit. ### START CODE HERE ### (approx. 5 lines) thetaplus = theta+epsilon # Step 1 thetaminus = theta-epsilon # Step 2 J_plus = forward_propagation(x, thetaplus) # Step 3 J_minus = forward_propagation(x, thetaminus) # Step 4 gradapprox = (J_plus-J_minus)/(2.*epsilon) # Step 5 ### END CODE HERE ### # Check if gradapprox is close enough to the output of backward_propagation() ### START CODE HERE ### (approx. 1 line) grad = backward_propagation(x, theta) ### END CODE HERE ### ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad-gradapprox) # Step 1' denominator = np.linalg.norm(grad)+np.linalg.norm(gradapprox) # Step 2' difference = numerator/denominator # Step 3' ### END CODE HERE ### if difference < 1e-7: print ("The gradient is correct!") else: print ("The gradient is wrong!") return difference
def gradient_check(x, theta, epsilon = 1e-7): """ Implement the backward propagation presented in Figure 1. Arguments: x -- a real-valued input theta -- our parameter, a real number as well epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Compute gradapprox using left side of formula (1). epsilon is small enough, you don't need to worry about the limit. ### START CODE HERE ### (approx. 5 lines) thetaplus = theta+epsilon # Step 1 thetaminus = theta-epsilon # Step 2 J_plus = forward_propagation(x, thetaplus) # Step 3 J_minus = forward_propagation(x, thetaminus) # Step 4 gradapprox = (J_plus-J_minus)/(2.*epsilon) # Step 5 ### END CODE HERE ### # Check if gradapprox is close enough to the output of backward_propagation() ### START CODE HERE ### (approx. 1 line) grad = backward_propagation(x, theta) ### END CODE HERE ### ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad-gradapprox) # Step 1' denominator = np.linalg.norm(grad)+np.linalg.norm(gradapprox) # Step 2' difference = numerator/denominator # Step 3' ### END CODE HERE ### if difference < 1e-7: print ("The gradient is correct!") else: print ("The gradient is wrong!") return difference
Python
def forward_propagation_n(X, Y, parameters): """ Implements the forward propagation (and computes the cost) presented in Figure 3. Arguments: X -- training set for m examples Y -- labels for m examples parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (5, 4) b1 -- bias vector of shape (5, 1) W2 -- weight matrix of shape (3, 5) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) Returns: cost -- the cost function (logistic cost for one example) """ # retrieve parameters m = X.shape[1] W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) # Cost logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y) cost = 1./m * np.sum(logprobs) cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) return cost, cache
def forward_propagation_n(X, Y, parameters): """ Implements the forward propagation (and computes the cost) presented in Figure 3. Arguments: X -- training set for m examples Y -- labels for m examples parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (5, 4) b1 -- bias vector of shape (5, 1) W2 -- weight matrix of shape (3, 5) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) Returns: cost -- the cost function (logistic cost for one example) """ # retrieve parameters m = X.shape[1] W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) # Cost logprobs = np.multiply(-np.log(A3),Y) + np.multiply(-np.log(1 - A3), 1 - Y) cost = 1./m * np.sum(logprobs) cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) return cost, cache
Python
def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) * 2 db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) * 2 db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 4./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
Python
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7): """ Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n Arguments: parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. x -- input datapoint, of shape (input size, 1) y -- true "label" epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Set-up variables parameters_values, _ = dictionary_to_vector(parameters) grad = gradients_to_vector(gradients) num_parameters = parameters_values.shape[0] J_plus = np.zeros((num_parameters, 1)) J_minus = np.zeros((num_parameters, 1)) gradapprox = np.zeros((num_parameters, 1)) # Compute gradapprox for i in range(num_parameters): # Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]". # "_" is used because the function you have to outputs two parameters but we only care about the first one ### START CODE HERE ### (approx. 3 lines) thetaplus = np.copy(parameters_values) # Step 1 thetaplus[i][0] = thetaplus[i][0]+epsilon # Step 2 J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3 ### END CODE HERE ### # Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]". ### START CODE HERE ### (approx. 3 lines) thetaminus = np.copy(parameters_values) # Step 1 thetaminus[i][0] = thetaminus[i][0]-epsilon # Step 2 J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3 ### END CODE HERE ### # Compute gradapprox[i] ### START CODE HERE ### (approx. 1 line) gradapprox[i] = (J_plus[i] - J_minus[i]) / (2. * epsilon) ### END CODE HERE ### # Compare gradapprox to backward propagation gradients by computing difference. ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad - gradapprox) # Step 1' denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2' difference = numerator / denominator # Step 3' ### END CODE HERE ### if difference > 2e-7: print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m") else: print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m") return difference
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7): """ Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n Arguments: parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameters. x -- input datapoint, of shape (input size, 1) y -- true "label" epsilon -- tiny shift to the input to compute approximated gradient with formula(1) Returns: difference -- difference (2) between the approximated gradient and the backward propagation gradient """ # Set-up variables parameters_values, _ = dictionary_to_vector(parameters) grad = gradients_to_vector(gradients) num_parameters = parameters_values.shape[0] J_plus = np.zeros((num_parameters, 1)) J_minus = np.zeros((num_parameters, 1)) gradapprox = np.zeros((num_parameters, 1)) # Compute gradapprox for i in range(num_parameters): # Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output = "J_plus[i]". # "_" is used because the function you have to outputs two parameters but we only care about the first one ### START CODE HERE ### (approx. 3 lines) thetaplus = np.copy(parameters_values) # Step 1 thetaplus[i][0] = thetaplus[i][0]+epsilon # Step 2 J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus)) # Step 3 ### END CODE HERE ### # Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output = "J_minus[i]". ### START CODE HERE ### (approx. 3 lines) thetaminus = np.copy(parameters_values) # Step 1 thetaminus[i][0] = thetaminus[i][0]-epsilon # Step 2 J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus)) # Step 3 ### END CODE HERE ### # Compute gradapprox[i] ### START CODE HERE ### (approx. 1 line) gradapprox[i] = (J_plus[i] - J_minus[i]) / (2. * epsilon) ### END CODE HERE ### # Compare gradapprox to backward propagation gradients by computing difference. ### START CODE HERE ### (approx. 1 line) numerator = np.linalg.norm(grad - gradapprox) # Step 1' denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox) # Step 2' difference = numerator / denominator # Step 3' ### END CODE HERE ### if difference > 2e-7: print ("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) + "\033[0m") else: print ("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) + "\033[0m") return difference
Python
def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
def backward_propagation_n(X, Y, cache): """ Implement the backward propagation presented in figure 2. Arguments: X -- input datapoint, of shape (input size, 1) Y -- true "label" cache -- cache output from forward_propagation_n() Returns: gradients -- A dictionary with the gradients of the cost with respect to each parameter, activation and pre-activation variables. """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3, "dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
Python
def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (β‰ˆ 1 lines of code) cost = (-1./ m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1-Y), np.log( 1-AL))) ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost
def compute_cost(AL, Y): """ Implement the cost function defined by equation (7). Arguments: AL -- probability vector corresponding to your label predictions, shape (1, number of examples) Y -- true "label" vector (for example: containing 0 if non-cat, 1 if cat), shape (1, number of examples) Returns: cost -- cross-entropy cost """ m = Y.shape[1] # Compute loss from aL and y. ### START CODE HERE ### (β‰ˆ 1 lines of code) cost = (-1./ m) * np.sum(np.multiply(Y, np.log(AL)) + np.multiply((1-Y), np.log( 1-AL))) ### END CODE HERE ### cost = np.squeeze(cost) # To make sure your cost's shape is what we expect (e.g. this turns [[17]] into 17). assert(cost.shape == ()) return cost
Python
def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (β‰ˆ 3 lines of code) dW = (1. / m) * np.dot(dZ, cache[0].T) db = (1. / m) * np.sum(dZ, axis=1, keepdims=True) dA_prev = np.dot(cache[1].T, dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db
def linear_backward(dZ, cache): """ Implement the linear portion of backward propagation for a single layer (layer l) Arguments: dZ -- Gradient of the cost with respect to the linear output (of current layer l) cache -- tuple of values (A_prev, W, b) coming from the forward propagation in the current layer Returns: dA_prev -- Gradient of the cost with respect to the activation (of the previous layer l-1), same shape as A_prev dW -- Gradient of the cost with respect to W (current layer l), same shape as W db -- Gradient of the cost with respect to b (current layer l), same shape as b """ A_prev, W, b = cache m = A_prev.shape[1] ### START CODE HERE ### (β‰ˆ 3 lines of code) dW = (1. / m) * np.dot(dZ, cache[0].T) db = (1. / m) * np.sum(dZ, axis=1, keepdims=True) dA_prev = np.dot(cache[1].T, dZ) ### END CODE HERE ### assert (dA_prev.shape == A_prev.shape) assert (dW.shape == W.shape) assert (db.shape == b.shape) return dA_prev, dW, db
Python
def sentence_to_avg(sentence, word_to_vec_map): """ Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word and averages its value into a single vector encoding the meaning of the sentence. Arguments: sentence -- string, one training example from X word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation Returns: avg -- average vector encoding information about the sentence, numpy-array of shape (50,) """ ### START CODE HERE ### # Step 1: Split sentence into list of lower case words (β‰ˆ 1 line) words = [word.lower() for word in sentence.split()] # Initialize the average word vector, should have the same shape as your word vectors. avg = np.zeros(50,) # Step 2: average the word vectors. You can loop over the words in the list "words". total = 0 for w in words: total += word_to_vec_map[w] avg = total/len(words) ### END CODE HERE ### return avg
def sentence_to_avg(sentence, word_to_vec_map): """ Converts a sentence (string) into a list of words (strings). Extracts the GloVe representation of each word and averages its value into a single vector encoding the meaning of the sentence. Arguments: sentence -- string, one training example from X word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation Returns: avg -- average vector encoding information about the sentence, numpy-array of shape (50,) """ ### START CODE HERE ### # Step 1: Split sentence into list of lower case words (β‰ˆ 1 line) words = [word.lower() for word in sentence.split()] # Initialize the average word vector, should have the same shape as your word vectors. avg = np.zeros(50,) # Step 2: average the word vectors. You can loop over the words in the list "words". total = 0 for w in words: total += word_to_vec_map[w] avg = total/len(words) ### END CODE HERE ### return avg
Python
def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400): """ Model to train word vector representations in numpy. Arguments: X -- input data, numpy array of sentences as strings, of shape (m, 1) Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1) word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation learning_rate -- learning_rate for the stochastic gradient descent algorithm num_iterations -- number of iterations Returns: pred -- vector of predictions, numpy-array of shape (m, 1) W -- weight matrix of the softmax layer, of shape (n_y, n_h) b -- bias of the softmax layer, of shape (n_y,) """ np.random.seed(1) # Define number of training examples m = Y.shape[0] # number of training examples n_y = 5 # number of classes n_h = 50 # dimensions of the GloVe vectors # Initialize parameters using Xavier initialization W = np.random.randn(n_y, n_h) / np.sqrt(n_h) b = np.zeros((n_y,)) # Convert Y to Y_onehot with n_y classes Y_oh = convert_to_one_hot(Y, C = n_y) # Optimization loop for t in range(num_iterations): # Loop over the number of iterations for i in range(m): # Loop over the training examples ### START CODE HERE ### (β‰ˆ 4 lines of code) # Average the word vectors of the words from the i'th training example avg = sentence_to_avg(X[i],word_to_vec_map) # Forward propagate the avg through the softmax layer z = np.dot(W,avg)+b a = softmax(z) # Compute cost using the i'th training label's one hot representation and "A" (the output of the softmax) cost = -np.dot(Y_oh[i],np.log(a)) ### END CODE HERE ### # Compute gradients dz = a - Y_oh[i] dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h)) db = dz # Update parameters with Stochastic Gradient Descent W = W - learning_rate * dW b = b - learning_rate * db if t % 100 == 0: print("Epoch: " + str(t) + " --- cost = " + str(cost)) pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py return pred, W, b
def model(X, Y, word_to_vec_map, learning_rate = 0.01, num_iterations = 400): """ Model to train word vector representations in numpy. Arguments: X -- input data, numpy array of sentences as strings, of shape (m, 1) Y -- labels, numpy array of integers between 0 and 7, numpy-array of shape (m, 1) word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation learning_rate -- learning_rate for the stochastic gradient descent algorithm num_iterations -- number of iterations Returns: pred -- vector of predictions, numpy-array of shape (m, 1) W -- weight matrix of the softmax layer, of shape (n_y, n_h) b -- bias of the softmax layer, of shape (n_y,) """ np.random.seed(1) # Define number of training examples m = Y.shape[0] # number of training examples n_y = 5 # number of classes n_h = 50 # dimensions of the GloVe vectors # Initialize parameters using Xavier initialization W = np.random.randn(n_y, n_h) / np.sqrt(n_h) b = np.zeros((n_y,)) # Convert Y to Y_onehot with n_y classes Y_oh = convert_to_one_hot(Y, C = n_y) # Optimization loop for t in range(num_iterations): # Loop over the number of iterations for i in range(m): # Loop over the training examples ### START CODE HERE ### (β‰ˆ 4 lines of code) # Average the word vectors of the words from the i'th training example avg = sentence_to_avg(X[i],word_to_vec_map) # Forward propagate the avg through the softmax layer z = np.dot(W,avg)+b a = softmax(z) # Compute cost using the i'th training label's one hot representation and "A" (the output of the softmax) cost = -np.dot(Y_oh[i],np.log(a)) ### END CODE HERE ### # Compute gradients dz = a - Y_oh[i] dW = np.dot(dz.reshape(n_y,1), avg.reshape(1, n_h)) db = dz # Update parameters with Stochastic Gradient Descent W = W - learning_rate * dW b = b - learning_rate * db if t % 100 == 0: print("Epoch: " + str(t) + " --- cost = " + str(cost)) pred = predict(X, Y, W, b, word_to_vec_map) #predict is defined in emo_utils.py return pred, W, b
Python
def sentences_to_indices(X, word_to_index, max_len): """ Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences. The output shape should be such that it can be given to `Embedding()` (described in Figure 4). Arguments: X -- array of sentences (strings), of shape (m, 1) word_to_index -- a dictionary containing the each word mapped to its index max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this. Returns: X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len) """ m = X.shape[0] # number of training examples ### START CODE HERE ### # Initialize X_indices as a numpy matrix of zeros and the correct shape (β‰ˆ 1 line) X_indices = np.zeros((X.shape[0],max_len)) for i in range(m): # loop over training examples # Convert the ith training sentence in lower case and split is into words. You should get a list of words. sentence_words = [word.lower() for word in X[i].split()] # Initialize j to 0 j = 0 # Loop over the words of sentence_words for w in sentence_words: # Set the (i,j)th entry of X_indices to the index of the correct word. X_indices[i, j] = word_to_index[w] # Increment j to j + 1 j += 1 ### END CODE HERE ### return X_indices
def sentences_to_indices(X, word_to_index, max_len): """ Converts an array of sentences (strings) into an array of indices corresponding to words in the sentences. The output shape should be such that it can be given to `Embedding()` (described in Figure 4). Arguments: X -- array of sentences (strings), of shape (m, 1) word_to_index -- a dictionary containing the each word mapped to its index max_len -- maximum number of words in a sentence. You can assume every sentence in X is no longer than this. Returns: X_indices -- array of indices corresponding to words in the sentences from X, of shape (m, max_len) """ m = X.shape[0] # number of training examples ### START CODE HERE ### # Initialize X_indices as a numpy matrix of zeros and the correct shape (β‰ˆ 1 line) X_indices = np.zeros((X.shape[0],max_len)) for i in range(m): # loop over training examples # Convert the ith training sentence in lower case and split is into words. You should get a list of words. sentence_words = [word.lower() for word in X[i].split()] # Initialize j to 0 j = 0 # Loop over the words of sentence_words for w in sentence_words: # Set the (i,j)th entry of X_indices to the index of the correct word. X_indices[i, j] = word_to_index[w] # Increment j to j + 1 j += 1 ### END CODE HERE ### return X_indices
Python
def pretrained_embedding_layer(word_to_vec_map, word_to_index): """ Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors. Arguments: word_to_vec_map -- dictionary mapping words to their GloVe vector representation. word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words) Returns: embedding_layer -- pretrained layer Keras instance """ vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement) emb_dim = word_to_vec_map["cucumber"].shape[0] # define dimensionality of your GloVe word vectors (= 50) ### START CODE HERE ### # Step 1 # Initialize the embedding matrix as a numpy array of zeros. # See instructions above to choose the correct shape. emb_matrix = np.zeros((vocab_len, emb_dim)) # Step 2 # Set each row "idx" of the embedding matrix to be # the word vector representation of the idx'th word of the vocabulary for word, idx in word_to_index.items(): emb_matrix[idx, :] = word_to_vec_map[word] # Step 3 # Define Keras embedding layer with the correct input and output sizes # Make it non-trainable. embedding_layer = Embedding(vocab_len, emb_dim, trainable=False) ### END CODE HERE ### # Step 4 (already done for you; please do not modify) # Build the embedding layer, it is required before setting the weights of the embedding layer. embedding_layer.build((None,)) # Do not modify the "None". This line of code is complete as-is. # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained. embedding_layer.set_weights([emb_matrix]) return embedding_layer
def pretrained_embedding_layer(word_to_vec_map, word_to_index): """ Creates a Keras Embedding() layer and loads in pre-trained GloVe 50-dimensional vectors. Arguments: word_to_vec_map -- dictionary mapping words to their GloVe vector representation. word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words) Returns: embedding_layer -- pretrained layer Keras instance """ vocab_len = len(word_to_index) + 1 # adding 1 to fit Keras embedding (requirement) emb_dim = word_to_vec_map["cucumber"].shape[0] # define dimensionality of your GloVe word vectors (= 50) ### START CODE HERE ### # Step 1 # Initialize the embedding matrix as a numpy array of zeros. # See instructions above to choose the correct shape. emb_matrix = np.zeros((vocab_len, emb_dim)) # Step 2 # Set each row "idx" of the embedding matrix to be # the word vector representation of the idx'th word of the vocabulary for word, idx in word_to_index.items(): emb_matrix[idx, :] = word_to_vec_map[word] # Step 3 # Define Keras embedding layer with the correct input and output sizes # Make it non-trainable. embedding_layer = Embedding(vocab_len, emb_dim, trainable=False) ### END CODE HERE ### # Step 4 (already done for you; please do not modify) # Build the embedding layer, it is required before setting the weights of the embedding layer. embedding_layer.build((None,)) # Do not modify the "None". This line of code is complete as-is. # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained. embedding_layer.set_weights([emb_matrix]) return embedding_layer
Python
def Emojify_V2(input_shape, word_to_vec_map, word_to_index): """ Function creating the Emojify-v2 model's graph. Arguments: input_shape -- shape of the input, usually (max_len,) word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words) Returns: model -- a model instance in Keras """ ### START CODE HERE ### # Define sentence_indices as the input of the graph. # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers). sentence_indices = Input(input_shape, dtype='int32') # Create the embedding layer pretrained with GloVe Vectors (β‰ˆ1 line) embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index) # Propagate sentence_indices through your embedding layer # (See additional hints in the instructions). embeddings = embedding_layer(sentence_indices) # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state # The returned output should be a batch of sequences. X = LSTM(128, return_sequences=True)(embeddings) # Add dropout with a probability of 0.5 X = Dropout(0.5)(X) # Propagate X trough another LSTM layer with 128-dimensional hidden state # The returned output should be a single hidden state, not a batch of sequences. X = LSTM(128, return_sequences=False)(X) # Add dropout with a probability of 0.5 X = Dropout(0.5)(X) # Propagate X through a Dense layer with 5 units X = Dense(5)(X) # Add a softmax activation X = Activation('softmax')(X) # Create Model instance which converts sentence_indices into X. model = Model(inputs=sentence_indices, outputs=X) ### END CODE HERE ### return model
def Emojify_V2(input_shape, word_to_vec_map, word_to_index): """ Function creating the Emojify-v2 model's graph. Arguments: input_shape -- shape of the input, usually (max_len,) word_to_vec_map -- dictionary mapping every word in a vocabulary into its 50-dimensional vector representation word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words) Returns: model -- a model instance in Keras """ ### START CODE HERE ### # Define sentence_indices as the input of the graph. # It should be of shape input_shape and dtype 'int32' (as it contains indices, which are integers). sentence_indices = Input(input_shape, dtype='int32') # Create the embedding layer pretrained with GloVe Vectors (β‰ˆ1 line) embedding_layer = pretrained_embedding_layer(word_to_vec_map, word_to_index) # Propagate sentence_indices through your embedding layer # (See additional hints in the instructions). embeddings = embedding_layer(sentence_indices) # Propagate the embeddings through an LSTM layer with 128-dimensional hidden state # The returned output should be a batch of sequences. X = LSTM(128, return_sequences=True)(embeddings) # Add dropout with a probability of 0.5 X = Dropout(0.5)(X) # Propagate X trough another LSTM layer with 128-dimensional hidden state # The returned output should be a single hidden state, not a batch of sequences. X = LSTM(128, return_sequences=False)(X) # Add dropout with a probability of 0.5 X = Dropout(0.5)(X) # Propagate X through a Dense layer with 5 units X = Dense(5)(X) # Add a softmax activation X = Activation('softmax')(X) # Create Model instance which converts sentence_indices into X. model = Model(inputs=sentence_indices, outputs=X) ### END CODE HERE ### return model
Python
def forward_propagation(X, parameters): """ Argument: X -- input data of size (n_x, m) parameters -- python dictionary containing your parameters (output of initialization function) Returns: A2 -- The sigmoid output of the second activation cache -- a dictionary containing "Z1", "A1", "Z2" and "A2" """ # Retrieve each parameter from the dictionary "parameters" ### START CODE HERE ### (β‰ˆ 4 lines of code) W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] ### END CODE HERE ### # Implement Forward Propagation to calculate A2 (probabilities) ### START CODE HERE ### (β‰ˆ 4 lines of code) Z1 = np.dot(W1, X) + b1 A1 = np.tanh(Z1) Z2 = np.dot(W2, A1) + b2 A2 = sigmoid(Z2) ### END CODE HERE ### assert(A2.shape == (1, X.shape[1])) cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2} return A2, cache
def forward_propagation(X, parameters): """ Argument: X -- input data of size (n_x, m) parameters -- python dictionary containing your parameters (output of initialization function) Returns: A2 -- The sigmoid output of the second activation cache -- a dictionary containing "Z1", "A1", "Z2" and "A2" """ # Retrieve each parameter from the dictionary "parameters" ### START CODE HERE ### (β‰ˆ 4 lines of code) W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] ### END CODE HERE ### # Implement Forward Propagation to calculate A2 (probabilities) ### START CODE HERE ### (β‰ˆ 4 lines of code) Z1 = np.dot(W1, X) + b1 A1 = np.tanh(Z1) Z2 = np.dot(W2, A1) + b2 A2 = sigmoid(Z2) ### END CODE HERE ### assert(A2.shape == (1, X.shape[1])) cache = {"Z1": Z1, "A1": A1, "Z2": Z2, "A2": A2} return A2, cache
Python
def backward_propagation(parameters, cache, X, Y): """ Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters """ m = X.shape[1] # First, retrieve W1 and W2 from the dictionary "parameters". ### START CODE HERE ### (β‰ˆ 2 lines of code) W1 = parameters['W1'] W2 = parameters['W2'] ### END CODE HERE ### # Retrieve also A1 and A2 from dictionary "cache". ### START CODE HERE ### (β‰ˆ 2 lines of code) A1 = cache['A1'] A2 = cache['A2'] ### END CODE HERE ### # Backward propagation: calculate dW1, db1, dW2, db2. ### START CODE HERE ### (β‰ˆ 6 lines of code, corresponding to 6 equations on slide above) dZ2= A2 - Y dW2 = (1 / m) * np.dot(dZ2, A1.T) db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True) dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2)) dW1 = (1 / m) * np.dot(dZ1, X.T) db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True) ### END CODE HERE ### grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return grads
def backward_propagation(parameters, cache, X, Y): """ Implement the backward propagation using the instructions above. Arguments: parameters -- python dictionary containing our parameters cache -- a dictionary containing "Z1", "A1", "Z2" and "A2". X -- input data of shape (2, number of examples) Y -- "true" labels vector of shape (1, number of examples) Returns: grads -- python dictionary containing your gradients with respect to different parameters """ m = X.shape[1] # First, retrieve W1 and W2 from the dictionary "parameters". ### START CODE HERE ### (β‰ˆ 2 lines of code) W1 = parameters['W1'] W2 = parameters['W2'] ### END CODE HERE ### # Retrieve also A1 and A2 from dictionary "cache". ### START CODE HERE ### (β‰ˆ 2 lines of code) A1 = cache['A1'] A2 = cache['A2'] ### END CODE HERE ### # Backward propagation: calculate dW1, db1, dW2, db2. ### START CODE HERE ### (β‰ˆ 6 lines of code, corresponding to 6 equations on slide above) dZ2= A2 - Y dW2 = (1 / m) * np.dot(dZ2, A1.T) db2 = (1 / m) * np.sum(dZ2, axis=1, keepdims=True) dZ1 = np.multiply(np.dot(W2.T, dZ2), 1 - np.power(A1, 2)) dW1 = (1 / m) * np.dot(dZ1, X.T) db1 = (1 / m) * np.sum(dZ1, axis=1, keepdims=True) ### END CODE HERE ### grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2} return grads
Python
def update_parameters(parameters, grads, learning_rate=1.2): """ Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters """ # Retrieve each parameter from the dictionary "parameters" ### START CODE HERE ### (β‰ˆ 4 lines of code) W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] ### END CODE HERE ### # Retrieve each gradient from the dictionary "grads" ### START CODE HERE ### (β‰ˆ 4 lines of code) dW1 = grads['dW1'] db1 = grads['db1'] dW2 = grads['dW2'] db2 = grads['db2'] ## END CODE HERE ### # Update rule for each parameter ### START CODE HERE ### (β‰ˆ 4 lines of code) W1 = W1 - learning_rate * dW1 b1 = b1 - learning_rate * db1 W2 = W2 - learning_rate * dW2 b2 = b2 - learning_rate * db2 ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters
def update_parameters(parameters, grads, learning_rate=1.2): """ Updates parameters using the gradient descent update rule given above Arguments: parameters -- python dictionary containing your parameters grads -- python dictionary containing your gradients Returns: parameters -- python dictionary containing your updated parameters """ # Retrieve each parameter from the dictionary "parameters" ### START CODE HERE ### (β‰ˆ 4 lines of code) W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] ### END CODE HERE ### # Retrieve each gradient from the dictionary "grads" ### START CODE HERE ### (β‰ˆ 4 lines of code) dW1 = grads['dW1'] db1 = grads['db1'] dW2 = grads['dW2'] db2 = grads['db2'] ## END CODE HERE ### # Update rule for each parameter ### START CODE HERE ### (β‰ˆ 4 lines of code) W1 = W1 - learning_rate * dW1 b1 = b1 - learning_rate * db1 W2 = W2 - learning_rate * dW2 b2 = b2 - learning_rate * db2 ### END CODE HERE ### parameters = {"W1": W1, "b1": b1, "W2": W2, "b2": b2} return parameters
Python
def nn_model(X, Y, n_h, num_iterations=10000, print_cost=False): """ Arguments: X -- dataset of shape (2, number of examples) Y -- labels of shape (1, number of examples) n_h -- size of the hidden layer num_iterations -- Number of iterations in gradient descent loop print_cost -- if True, print the cost every 1000 iterations Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(3) n_x = layer_sizes(X, Y)[0] n_y = layer_sizes(X, Y)[2] # Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters". ### START CODE HERE ### (β‰ˆ 5 lines of code) parameters = initialize_parameters(n_x, n_h, n_y) W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] ### END CODE HERE ### # Loop (gradient descent) for i in range(0, num_iterations): ### START CODE HERE ### (β‰ˆ 4 lines of code) # Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache". A2, cache = forward_propagation(X, parameters) # Cost function. Inputs: "A2, Y, parameters". Outputs: "cost". cost = compute_cost(A2, Y, parameters) # Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads". grads = backward_propagation(parameters, cache, X, Y) # Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters". parameters = update_parameters(parameters, grads) ### END CODE HERE ### # Print the cost every 1000 iterations if print_cost and i % 1000 == 0: print ("Cost after iteration %i: %f" % (i, cost)) return parameters
def nn_model(X, Y, n_h, num_iterations=10000, print_cost=False): """ Arguments: X -- dataset of shape (2, number of examples) Y -- labels of shape (1, number of examples) n_h -- size of the hidden layer num_iterations -- Number of iterations in gradient descent loop print_cost -- if True, print the cost every 1000 iterations Returns: parameters -- parameters learnt by the model. They can then be used to predict. """ np.random.seed(3) n_x = layer_sizes(X, Y)[0] n_y = layer_sizes(X, Y)[2] # Initialize parameters, then retrieve W1, b1, W2, b2. Inputs: "n_x, n_h, n_y". Outputs = "W1, b1, W2, b2, parameters". ### START CODE HERE ### (β‰ˆ 5 lines of code) parameters = initialize_parameters(n_x, n_h, n_y) W1 = parameters['W1'] b1 = parameters['b1'] W2 = parameters['W2'] b2 = parameters['b2'] ### END CODE HERE ### # Loop (gradient descent) for i in range(0, num_iterations): ### START CODE HERE ### (β‰ˆ 4 lines of code) # Forward propagation. Inputs: "X, parameters". Outputs: "A2, cache". A2, cache = forward_propagation(X, parameters) # Cost function. Inputs: "A2, Y, parameters". Outputs: "cost". cost = compute_cost(A2, Y, parameters) # Backpropagation. Inputs: "parameters, cache, X, Y". Outputs: "grads". grads = backward_propagation(parameters, cache, X, Y) # Gradient descent parameter update. Inputs: "parameters, grads". Outputs: "parameters". parameters = update_parameters(parameters, grads) ### END CODE HERE ### # Print the cost every 1000 iterations if print_cost and i % 1000 == 0: print ("Cost after iteration %i: %f" % (i, cost)) return parameters
Python
def compute_cost_with_regularization(A3, Y, parameters, lambd): """ Implement the cost function with L2 regularization. See formula (2) above. Arguments: A3 -- post-activation, output of forward propagation, of shape (output size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) parameters -- python dictionary containing parameters of the model Returns: cost - value of the regularized loss function (formula (2)) """ m = Y.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] W3 = parameters["W3"] cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost ### START CODE HERE ### (approx. 1 line) L2_regularization_cost = (np.sum(np.square(W1))+np.sum(np.square(W2))+np.sum(np.square(W3)))*lambd/(2*m) ### END CODER HERE ### cost = cross_entropy_cost + L2_regularization_cost return cost
def compute_cost_with_regularization(A3, Y, parameters, lambd): """ Implement the cost function with L2 regularization. See formula (2) above. Arguments: A3 -- post-activation, output of forward propagation, of shape (output size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) parameters -- python dictionary containing parameters of the model Returns: cost - value of the regularized loss function (formula (2)) """ m = Y.shape[1] W1 = parameters["W1"] W2 = parameters["W2"] W3 = parameters["W3"] cross_entropy_cost = compute_cost(A3, Y) # This gives you the cross-entropy part of the cost ### START CODE HERE ### (approx. 1 line) L2_regularization_cost = (np.sum(np.square(W1))+np.sum(np.square(W2))+np.sum(np.square(W3)))*lambd/(2*m) ### END CODER HERE ### cost = cross_entropy_cost + L2_regularization_cost return cost
Python
def backward_propagation_with_regularization(X, Y, cache, lambd): """ Implements the backward propagation of our baseline model to which we added an L2 regularization. Arguments: X -- input dataset, of shape (input size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation() lambd -- regularization hyperparameter, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y ### START CODE HERE ### (approx. 1 line) dW3 = 1./m * np.dot(dZ3, A2.T) + lambd/m*W3 ### END CODE HERE ### db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) ### START CODE HERE ### (approx. 1 line) dW2 = 1./m * np.dot(dZ2, A1.T) + lambd/m*W2 ### END CODE HERE ### db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) ### START CODE HERE ### (approx. 1 line) dW1 = 1./m * np.dot(dZ1, X.T) + lambd/m*W1 ### END CODE HERE ### db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
def backward_propagation_with_regularization(X, Y, cache, lambd): """ Implements the backward propagation of our baseline model to which we added an L2 regularization. Arguments: X -- input dataset, of shape (input size, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation() lambd -- regularization hyperparameter, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y ### START CODE HERE ### (approx. 1 line) dW3 = 1./m * np.dot(dZ3, A2.T) + lambd/m*W3 ### END CODE HERE ### db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) dZ2 = np.multiply(dA2, np.int64(A2 > 0)) ### START CODE HERE ### (approx. 1 line) dW2 = 1./m * np.dot(dZ2, A1.T) + lambd/m*W2 ### END CODE HERE ### db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) dZ1 = np.multiply(dA1, np.int64(A1 > 0)) ### START CODE HERE ### (approx. 1 line) dW1 = 1./m * np.dot(dZ1, X.T) + lambd/m*W1 ### END CODE HERE ### db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
Python
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): """ Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID. Arguments: X -- input dataset, of shape (2, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (20, 2) b1 -- bias vector of shape (20, 1) W2 -- weight matrix of shape (3, 20) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: A3 -- last activation value, output of the forward propagation, of shape (1,1) cache -- tuple, information stored for computing the backward propagation """ np.random.seed(1) # retrieve parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. D1 = np.random.rand(A1.shape[0],A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...) D1 = D1< keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) A1 = A1*D1 # Step 3: shut down some neurons of A1 A1 = A1/ keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) ### START CODE HERE ### (approx. 4 lines) D2 = np.random.rand(A2.shape[0],A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...) D2 = D2< keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) A2 = A2*D2 # Step 3: shut down some neurons of A2 A2 = A2/keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) return A3, cache
def forward_propagation_with_dropout(X, parameters, keep_prob = 0.5): """ Implements the forward propagation: LINEAR -> RELU + DROPOUT -> LINEAR -> RELU + DROPOUT -> LINEAR -> SIGMOID. Arguments: X -- input dataset, of shape (2, number of examples) parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3": W1 -- weight matrix of shape (20, 2) b1 -- bias vector of shape (20, 1) W2 -- weight matrix of shape (3, 20) b2 -- bias vector of shape (3, 1) W3 -- weight matrix of shape (1, 3) b3 -- bias vector of shape (1, 1) keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: A3 -- last activation value, output of the forward propagation, of shape (1,1) cache -- tuple, information stored for computing the backward propagation """ np.random.seed(1) # retrieve parameters W1 = parameters["W1"] b1 = parameters["b1"] W2 = parameters["W2"] b2 = parameters["b2"] W3 = parameters["W3"] b3 = parameters["b3"] # LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID Z1 = np.dot(W1, X) + b1 A1 = relu(Z1) ### START CODE HERE ### (approx. 4 lines) # Steps 1-4 below correspond to the Steps 1-4 described above. D1 = np.random.rand(A1.shape[0],A1.shape[1]) # Step 1: initialize matrix D1 = np.random.rand(..., ...) D1 = D1< keep_prob # Step 2: convert entries of D1 to 0 or 1 (using keep_prob as the threshold) A1 = A1*D1 # Step 3: shut down some neurons of A1 A1 = A1/ keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z2 = np.dot(W2, A1) + b2 A2 = relu(Z2) ### START CODE HERE ### (approx. 4 lines) D2 = np.random.rand(A2.shape[0],A2.shape[1]) # Step 1: initialize matrix D2 = np.random.rand(..., ...) D2 = D2< keep_prob # Step 2: convert entries of D2 to 0 or 1 (using keep_prob as the threshold) A2 = A2*D2 # Step 3: shut down some neurons of A2 A2 = A2/keep_prob # Step 4: scale the value of neurons that haven't been shut down ### END CODE HERE ### Z3 = np.dot(W3, A2) + b3 A3 = sigmoid(Z3) cache = (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) return A3, cache
Python
def backward_propagation_with_dropout(X, Y, cache, keep_prob): """ Implements the backward propagation of our baseline model to which we added dropout. Arguments: X -- input dataset, of shape (2, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation_with_dropout() keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) ### START CODE HERE ### (β‰ˆ 2 lines of code) dA2 = dA2*D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation dA2 = dA2/keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ2 = np.multiply(dA2, np.int64(A2 > 0))# ReLU dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) ### START CODE HERE ### (β‰ˆ 2 lines of code) dA1 = dA1*D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation dA1 = dA1/keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
def backward_propagation_with_dropout(X, Y, cache, keep_prob): """ Implements the backward propagation of our baseline model to which we added dropout. Arguments: X -- input dataset, of shape (2, number of examples) Y -- "true" labels vector, of shape (output size, number of examples) cache -- cache output from forward_propagation_with_dropout() keep_prob - probability of keeping a neuron active during drop-out, scalar Returns: gradients -- A dictionary with the gradients with respect to each parameter, activation and pre-activation variables """ m = X.shape[1] (Z1, D1, A1, W1, b1, Z2, D2, A2, W2, b2, Z3, A3, W3, b3) = cache dZ3 = A3 - Y dW3 = 1./m * np.dot(dZ3, A2.T) db3 = 1./m * np.sum(dZ3, axis=1, keepdims = True) dA2 = np.dot(W3.T, dZ3) ### START CODE HERE ### (β‰ˆ 2 lines of code) dA2 = dA2*D2 # Step 1: Apply mask D2 to shut down the same neurons as during the forward propagation dA2 = dA2/keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ2 = np.multiply(dA2, np.int64(A2 > 0))# ReLU dW2 = 1./m * np.dot(dZ2, A1.T) db2 = 1./m * np.sum(dZ2, axis=1, keepdims = True) dA1 = np.dot(W2.T, dZ2) ### START CODE HERE ### (β‰ˆ 2 lines of code) dA1 = dA1*D1 # Step 1: Apply mask D1 to shut down the same neurons as during the forward propagation dA1 = dA1/keep_prob # Step 2: Scale the value of neurons that haven't been shut down ### END CODE HERE ### dZ1 = np.multiply(dA1, np.int64(A1 > 0)) dW1 = 1./m * np.dot(dZ1, X.T) db1 = 1./m * np.sum(dZ1, axis=1, keepdims = True) gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2, "dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1} return gradients
Python
def zero_pad(X, pad): """ Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, as illustrated in Figure 1. Argument: X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images pad -- integer, amount of padding around each image on vertical and horizontal dimensions Returns: X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C) """ ### START CODE HERE ### (β‰ˆ 1 line) X_pad = np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),'constant', constant_values = 0) ### END CODE HERE ### return X_pad
def zero_pad(X, pad): """ Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image, as illustrated in Figure 1. Argument: X -- python numpy array of shape (m, n_H, n_W, n_C) representing a batch of m images pad -- integer, amount of padding around each image on vertical and horizontal dimensions Returns: X_pad -- padded image of shape (m, n_H + 2*pad, n_W + 2*pad, n_C) """ ### START CODE HERE ### (β‰ˆ 1 line) X_pad = np.pad(X,((0,0),(pad,pad),(pad,pad),(0,0)),'constant', constant_values = 0) ### END CODE HERE ### return X_pad
Python
def conv_single_step(a_slice_prev, W, b): """ Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation of the previous layer. Arguments: a_slice_prev -- slice of input data of shape (f, f, n_C_prev) W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev) b -- Bias parameters contained in a window - matrix of shape (1, 1, 1) Returns: Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data """ ### START CODE HERE ### (β‰ˆ 2 lines of code) # Element-wise product between a_slice and W. Do not add the bias yet. s = a_slice_prev*W #element-wise multiplication # Sum over all entries of the volume s. Z = np.sum(s) # Add bias b to Z. Cast b to a float() so that Z results in a scalar value. Z = Z+float(b) ### END CODE HERE ### return Z
def conv_single_step(a_slice_prev, W, b): """ Apply one filter defined by parameters W on a single slice (a_slice_prev) of the output activation of the previous layer. Arguments: a_slice_prev -- slice of input data of shape (f, f, n_C_prev) W -- Weight parameters contained in a window - matrix of shape (f, f, n_C_prev) b -- Bias parameters contained in a window - matrix of shape (1, 1, 1) Returns: Z -- a scalar value, result of convolving the sliding window (W, b) on a slice x of the input data """ ### START CODE HERE ### (β‰ˆ 2 lines of code) # Element-wise product between a_slice and W. Do not add the bias yet. s = a_slice_prev*W #element-wise multiplication # Sum over all entries of the volume s. Z = np.sum(s) # Add bias b to Z. Cast b to a float() so that Z results in a scalar value. Z = Z+float(b) ### END CODE HERE ### return Z
Python
def distribute_value(dz, shape): """ Distributes the input value in the matrix of dimension shape Arguments: dz -- input scalar shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz Returns: a -- Array of size (n_H, n_W) for which we distributed the value of dz """ ### START CODE HERE ### # Retrieve dimensions from shape (β‰ˆ1 line) (n_H, n_W) = shape # Compute the value to distribute on the matrix (β‰ˆ1 line) average = average = dz/(n_H*n_W) # Create a matrix where every entry is the "average" value (β‰ˆ1 line) a = average*np.ones([n_H,n_W]) ### END CODE HERE ### return a
def distribute_value(dz, shape): """ Distributes the input value in the matrix of dimension shape Arguments: dz -- input scalar shape -- the shape (n_H, n_W) of the output matrix for which we want to distribute the value of dz Returns: a -- Array of size (n_H, n_W) for which we distributed the value of dz """ ### START CODE HERE ### # Retrieve dimensions from shape (β‰ˆ1 line) (n_H, n_W) = shape # Compute the value to distribute on the matrix (β‰ˆ1 line) average = average = dz/(n_H*n_W) # Create a matrix where every entry is the "average" value (β‰ˆ1 line) a = average*np.ones([n_H,n_W]) ### END CODE HERE ### return a
Python
def forward_propagation(X, parameters): """ Implements the forward propagation for the model: CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "W2" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] W2 = parameters['W2'] ### START CODE HERE ### # CONV2D: stride of 1, padding 'SAME' Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME') # RELU A1 = tf.nn.relu(Z1) # MAXPOOL: window 8x8, sride 8, padding 'SAME' P1 = tf.nn.max_pool(A1, ksize = [1,8,8,1], strides = [1,8,8,1], padding = 'SAME') # CONV2D: filters W2, stride 1, padding 'SAME' Z2 = tf.nn.conv2d(P1,W2, strides = [1,1,1,1], padding = 'SAME') # RELU A2 = tf.nn.relu(Z2) # MAXPOOL: window 4x4, stride 4, padding 'SAME' P2 = tf.nn.max_pool(A2, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME') # FLATTEN P2 = tf.contrib.layers.flatten(P2) # FULLY-CONNECTED without non-linear activation function (not not call softmax). # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None" Z3 = tf.contrib.layers.fully_connected(P2, 6,activation_fn=None) ### END CODE HERE ### return Z3
def forward_propagation(X, parameters): """ Implements the forward propagation for the model: CONV2D -> RELU -> MAXPOOL -> CONV2D -> RELU -> MAXPOOL -> FLATTEN -> FULLYCONNECTED Arguments: X -- input dataset placeholder, of shape (input size, number of examples) parameters -- python dictionary containing your parameters "W1", "W2" the shapes are given in initialize_parameters Returns: Z3 -- the output of the last LINEAR unit """ # Retrieve the parameters from the dictionary "parameters" W1 = parameters['W1'] W2 = parameters['W2'] ### START CODE HERE ### # CONV2D: stride of 1, padding 'SAME' Z1 = tf.nn.conv2d(X,W1, strides = [1,1,1,1], padding = 'SAME') # RELU A1 = tf.nn.relu(Z1) # MAXPOOL: window 8x8, sride 8, padding 'SAME' P1 = tf.nn.max_pool(A1, ksize = [1,8,8,1], strides = [1,8,8,1], padding = 'SAME') # CONV2D: filters W2, stride 1, padding 'SAME' Z2 = tf.nn.conv2d(P1,W2, strides = [1,1,1,1], padding = 'SAME') # RELU A2 = tf.nn.relu(Z2) # MAXPOOL: window 4x4, stride 4, padding 'SAME' P2 = tf.nn.max_pool(A2, ksize = [1,4,4,1], strides = [1,4,4,1], padding = 'SAME') # FLATTEN P2 = tf.contrib.layers.flatten(P2) # FULLY-CONNECTED without non-linear activation function (not not call softmax). # 6 neurons in output layer. Hint: one of the arguments should be "activation_fn=None" Z3 = tf.contrib.layers.fully_connected(P2, 6,activation_fn=None) ### END CODE HERE ### return Z3
Python
def decode_enrichment(dct): """Decode enrichment data feature from json.""" if 'path' in dct and 'field_mappings' in dct: return EnrichmentData(dct['path'], dct['field_mappings']) else: return dct
def decode_enrichment(dct): """Decode enrichment data feature from json.""" if 'path' in dct and 'field_mappings' in dct: return EnrichmentData(dct['path'], dct['field_mappings']) else: return dct
Python
def encode_enrichment(enrichment): """Encode enrichment data feature to json.""" if isinstance(enrichment, EnrichmentData): field_dict = enrichment.__dict__ return field_dict else: type_name = enrichment.__class__.__name__ raise TypeError('Object of type {} is not JSON serializable'.format(type_name))
def encode_enrichment(enrichment): """Encode enrichment data feature to json.""" if isinstance(enrichment, EnrichmentData): field_dict = enrichment.__dict__ return field_dict else: type_name = enrichment.__class__.__name__ raise TypeError('Object of type {} is not JSON serializable'.format(type_name))
Python
def mutliple_enrichment(querylayer_points, globalid_field, enrichment_features): """Enrich points with fields from list of feature classes.""" enriched = querylayer_points accumulated_fields = [globalid_field] for feature in enrichment_features: old_enriched = enriched accumulated_fields.extend([f[0] for f in feature.field_mappings]) enriched = get_enriched_points( enriched, feature.path, accumulated_fields) arcpy.Delete_management(old_enriched) return enriched
def mutliple_enrichment(querylayer_points, globalid_field, enrichment_features): """Enrich points with fields from list of feature classes.""" enriched = querylayer_points accumulated_fields = [globalid_field] for feature in enrichment_features: old_enriched = enriched accumulated_fields.extend([f[0] for f in feature.field_mappings]) enriched = get_enriched_points( enriched, feature.path, accumulated_fields) arcpy.Delete_management(old_enriched) return enriched