repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
facelessuser/soupsieve
soupsieve/util.py
lower
def lower(string): """Lower.""" new_string = [] for c in string: o = ord(c) new_string.append(chr(o + 32) if UC_A <= o <= UC_Z else c) return ''.join(new_string)
python
def lower(string): """Lower.""" new_string = [] for c in string: o = ord(c) new_string.append(chr(o + 32) if UC_A <= o <= UC_Z else c) return ''.join(new_string)
Lower.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L45-L52
facelessuser/soupsieve
soupsieve/util.py
upper
def upper(string): # pragma: no cover """Lower.""" new_string = [] for c in string: o = ord(c) new_string.append(chr(o - 32) if LC_A <= o <= LC_Z else c) return ''.join(new_string)
python
def upper(string): # pragma: no cover """Lower.""" new_string = [] for c in string: o = ord(c) new_string.append(chr(o - 32) if LC_A <= o <= LC_Z else c) return ''.join(new_string)
Lower.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L55-L62
facelessuser/soupsieve
soupsieve/util.py
uord
def uord(c): """Get Unicode ordinal.""" if len(c) == 2: # pragma: no cover high, low = [ord(p) for p in c] ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000 else: ordinal = ord(c) return ordinal
python
def uord(c): """Get Unicode ordinal.""" if len(c) == 2: # pragma: no cover high, low = [ord(p) for p in c] ordinal = (high - 0xD800) * 0x400 + low - 0xDC00 + 0x10000 else: ordinal = ord(c) return ordinal
Get Unicode ordinal.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L74-L83
facelessuser/soupsieve
soupsieve/util.py
deprecated
def deprecated(message, stacklevel=2): # pragma: no cover """ Raise a `DeprecationWarning` when wrapped function/method is called. Borrowed from https://stackoverflow.com/a/48632082/866026 """ def _decorator(func): @wraps(func) def _func(*args, **kwargs): warnings.warn( "'{}' is deprecated. {}".format(func.__name__, message), category=DeprecationWarning, stacklevel=stacklevel ) return func(*args, **kwargs) return _func return _decorator
python
def deprecated(message, stacklevel=2): # pragma: no cover """ Raise a `DeprecationWarning` when wrapped function/method is called. Borrowed from https://stackoverflow.com/a/48632082/866026 """ def _decorator(func): @wraps(func) def _func(*args, **kwargs): warnings.warn( "'{}' is deprecated. {}".format(func.__name__, message), category=DeprecationWarning, stacklevel=stacklevel ) return func(*args, **kwargs) return _func return _decorator
Raise a `DeprecationWarning` when wrapped function/method is called. Borrowed from https://stackoverflow.com/a/48632082/866026
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L104-L121
facelessuser/soupsieve
soupsieve/util.py
warn_deprecated
def warn_deprecated(message, stacklevel=2): # pragma: no cover """Warn deprecated.""" warnings.warn( message, category=DeprecationWarning, stacklevel=stacklevel )
python
def warn_deprecated(message, stacklevel=2): # pragma: no cover """Warn deprecated.""" warnings.warn( message, category=DeprecationWarning, stacklevel=stacklevel )
Warn deprecated.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L124-L131
facelessuser/soupsieve
soupsieve/util.py
get_pattern_context
def get_pattern_context(pattern, index): """Get the pattern context.""" last = 0 current_line = 1 col = 1 text = [] line = 1 # Split pattern by newline and handle the text before the newline for m in RE_PATTERN_LINE_SPLIT.finditer(pattern): linetext = pattern[last:m.start(0)] if not len(m.group(0)) and not len(text): indent = '' offset = -1 col = index - last + 1 elif last <= index < m.end(0): indent = '--> ' offset = (-1 if index > m.start(0) else 0) + 3 col = index - last + 1 else: indent = ' ' offset = None if len(text): # Regardless of whether we are presented with `\r\n`, `\r`, or `\n`, # we will render the output with just `\n`. We will still log the column # correctly though. text.append('\n') text.append('{}{}'.format(indent, linetext)) if offset is not None: text.append('\n') text.append(' ' * (col + offset) + '^') line = current_line current_line += 1 last = m.end(0) return ''.join(text), line, col
python
def get_pattern_context(pattern, index): """Get the pattern context.""" last = 0 current_line = 1 col = 1 text = [] line = 1 # Split pattern by newline and handle the text before the newline for m in RE_PATTERN_LINE_SPLIT.finditer(pattern): linetext = pattern[last:m.start(0)] if not len(m.group(0)) and not len(text): indent = '' offset = -1 col = index - last + 1 elif last <= index < m.end(0): indent = '--> ' offset = (-1 if index > m.start(0) else 0) + 3 col = index - last + 1 else: indent = ' ' offset = None if len(text): # Regardless of whether we are presented with `\r\n`, `\r`, or `\n`, # we will render the output with just `\n`. We will still log the column # correctly though. text.append('\n') text.append('{}{}'.format(indent, linetext)) if offset is not None: text.append('\n') text.append(' ' * (col + offset) + '^') line = current_line current_line += 1 last = m.end(0) return ''.join(text), line, col
Get the pattern context.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L134-L171
facelessuser/soupsieve
soupsieve/util.py
warn_quirks
def warn_quirks(message, recommend, pattern, index): """Warn quirks.""" import traceback import bs4 # noqa: F401 # Acquire source code line context paths = (MODULE, sys.modules['bs4'].__path__[0]) tb = traceback.extract_stack() previous = None filename = None lineno = None for entry in tb: if (PY35 and entry.filename.startswith(paths)) or (not PY35 and entry[0].startswith(paths)): break previous = entry if previous: filename = previous.filename if PY35 else previous[0] lineno = previous.lineno if PY35 else previous[1] # Format pattern to show line and column position context, line = get_pattern_context(pattern, index)[0:2] # Display warning warnings.warn_explicit( "\nCSS selector pattern:\n" + " {}\n".format(message) + " This behavior is only allowed temporarily for Beautiful Soup's transition to Soup Sieve.\n" + " In order to confrom to the CSS spec, {}\n".format(recommend) + " It is strongly recommended the selector be altered to conform to the CSS spec " + "as an exception will be raised for this case in the future.\n" + "pattern line {}:\n{}".format(line, context), QuirksWarning, filename, lineno )
python
def warn_quirks(message, recommend, pattern, index): """Warn quirks.""" import traceback import bs4 # noqa: F401 # Acquire source code line context paths = (MODULE, sys.modules['bs4'].__path__[0]) tb = traceback.extract_stack() previous = None filename = None lineno = None for entry in tb: if (PY35 and entry.filename.startswith(paths)) or (not PY35 and entry[0].startswith(paths)): break previous = entry if previous: filename = previous.filename if PY35 else previous[0] lineno = previous.lineno if PY35 else previous[1] # Format pattern to show line and column position context, line = get_pattern_context(pattern, index)[0:2] # Display warning warnings.warn_explicit( "\nCSS selector pattern:\n" + " {}\n".format(message) + " This behavior is only allowed temporarily for Beautiful Soup's transition to Soup Sieve.\n" + " In order to confrom to the CSS spec, {}\n".format(recommend) + " It is strongly recommended the selector be altered to conform to the CSS spec " + "as an exception will be raised for this case in the future.\n" + "pattern line {}:\n{}".format(line, context), QuirksWarning, filename, lineno )
Warn quirks.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/util.py#L178-L213
facelessuser/soupsieve
soupsieve/__init__.py
compile
def compile(pattern, namespaces=None, flags=0, **kwargs): # noqa: A001 """Compile CSS pattern.""" if namespaces is not None: namespaces = ct.Namespaces(**namespaces) custom = kwargs.get('custom') if custom is not None: custom = ct.CustomSelectors(**custom) if isinstance(pattern, SoupSieve): if flags: raise ValueError("Cannot process 'flags' argument on a compiled selector list") elif namespaces is not None: raise ValueError("Cannot process 'namespaces' argument on a compiled selector list") elif custom is not None: raise ValueError("Cannot process 'custom' argument on a compiled selector list") return pattern return cp._cached_css_compile(pattern, namespaces, custom, flags)
python
def compile(pattern, namespaces=None, flags=0, **kwargs): # noqa: A001 """Compile CSS pattern.""" if namespaces is not None: namespaces = ct.Namespaces(**namespaces) custom = kwargs.get('custom') if custom is not None: custom = ct.CustomSelectors(**custom) if isinstance(pattern, SoupSieve): if flags: raise ValueError("Cannot process 'flags' argument on a compiled selector list") elif namespaces is not None: raise ValueError("Cannot process 'namespaces' argument on a compiled selector list") elif custom is not None: raise ValueError("Cannot process 'custom' argument on a compiled selector list") return pattern return cp._cached_css_compile(pattern, namespaces, custom, flags)
Compile CSS pattern.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L44-L63
facelessuser/soupsieve
soupsieve/__init__.py
closest
def closest(select, tag, namespaces=None, flags=0, **kwargs): """Match closest ancestor.""" return compile(select, namespaces, flags, **kwargs).closest(tag)
python
def closest(select, tag, namespaces=None, flags=0, **kwargs): """Match closest ancestor.""" return compile(select, namespaces, flags, **kwargs).closest(tag)
Match closest ancestor.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L72-L75
facelessuser/soupsieve
soupsieve/__init__.py
match
def match(select, tag, namespaces=None, flags=0, **kwargs): """Match node.""" return compile(select, namespaces, flags, **kwargs).match(tag)
python
def match(select, tag, namespaces=None, flags=0, **kwargs): """Match node.""" return compile(select, namespaces, flags, **kwargs).match(tag)
Match node.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L78-L81
facelessuser/soupsieve
soupsieve/__init__.py
filter
def filter(select, iterable, namespaces=None, flags=0, **kwargs): # noqa: A001 """Filter list of nodes.""" return compile(select, namespaces, flags, **kwargs).filter(iterable)
python
def filter(select, iterable, namespaces=None, flags=0, **kwargs): # noqa: A001 """Filter list of nodes.""" return compile(select, namespaces, flags, **kwargs).filter(iterable)
Filter list of nodes.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L84-L87
facelessuser/soupsieve
soupsieve/__init__.py
comments
def comments(tag, limit=0, flags=0, **kwargs): """Get comments only.""" return [comment for comment in cm.CommentsMatch(tag).get_comments(limit)]
python
def comments(tag, limit=0, flags=0, **kwargs): """Get comments only.""" return [comment for comment in cm.CommentsMatch(tag).get_comments(limit)]
Get comments only.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L91-L94
facelessuser/soupsieve
soupsieve/__init__.py
select_one
def select_one(select, tag, namespaces=None, flags=0, **kwargs): """Select a single tag.""" return compile(select, namespaces, flags, **kwargs).select_one(tag)
python
def select_one(select, tag, namespaces=None, flags=0, **kwargs): """Select a single tag.""" return compile(select, namespaces, flags, **kwargs).select_one(tag)
Select a single tag.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L105-L108
facelessuser/soupsieve
soupsieve/__init__.py
select
def select(select, tag, namespaces=None, limit=0, flags=0, **kwargs): """Select the specified tags.""" return compile(select, namespaces, flags, **kwargs).select(tag, limit)
python
def select(select, tag, namespaces=None, limit=0, flags=0, **kwargs): """Select the specified tags.""" return compile(select, namespaces, flags, **kwargs).select(tag, limit)
Select the specified tags.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L111-L114
facelessuser/soupsieve
soupsieve/__init__.py
iselect
def iselect(select, tag, namespaces=None, limit=0, flags=0, **kwargs): """Iterate the specified tags.""" for el in compile(select, namespaces, flags, **kwargs).iselect(tag, limit): yield el
python
def iselect(select, tag, namespaces=None, limit=0, flags=0, **kwargs): """Iterate the specified tags.""" for el in compile(select, namespaces, flags, **kwargs).iselect(tag, limit): yield el
Iterate the specified tags.
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/__init__.py#L117-L121
bigcommerce/bigcommerce-api-python
bigcommerce/resources/base.py
ListableApiResource.all
def all(cls, connection=None, **params): """ Returns first page if no params passed in as a list. """ request = cls._make_request('GET', cls._get_all_path(), connection, params=params) return cls._create_object(request, connection=connection)
python
def all(cls, connection=None, **params): """ Returns first page if no params passed in as a list. """ request = cls._make_request('GET', cls._get_all_path(), connection, params=params) return cls._create_object(request, connection=connection)
Returns first page if no params passed in as a list.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/resources/base.py#L102-L108
bigcommerce/bigcommerce-api-python
bigcommerce/resources/base.py
ListableApiResource.iterall
def iterall(cls, connection=None, **kwargs): """ Returns a autopaging generator that yields each object returned one by one. """ try: limit = kwargs['limit'] except KeyError: limit = None try: page = kwargs['page'] except KeyError: page = None def _all_responses(): page = 1 # one based params = kwargs.copy() while True: params.update(page=page, limit=250) rsp = cls._make_request('GET', cls._get_all_path(), connection, params=params) if rsp: yield rsp page += 1 else: yield [] # needed for case where there is no objects break if not (limit or page): for rsp in _all_responses(): for obj in rsp: yield cls._create_object(obj, connection=connection) else: response = cls._make_request('GET', cls._get_all_path(), connection, params=kwargs) for obj in cls._create_object(response, connection=connection): yield obj
python
def iterall(cls, connection=None, **kwargs): """ Returns a autopaging generator that yields each object returned one by one. """ try: limit = kwargs['limit'] except KeyError: limit = None try: page = kwargs['page'] except KeyError: page = None def _all_responses(): page = 1 # one based params = kwargs.copy() while True: params.update(page=page, limit=250) rsp = cls._make_request('GET', cls._get_all_path(), connection, params=params) if rsp: yield rsp page += 1 else: yield [] # needed for case where there is no objects break if not (limit or page): for rsp in _all_responses(): for obj in rsp: yield cls._create_object(obj, connection=connection) else: response = cls._make_request('GET', cls._get_all_path(), connection, params=kwargs) for obj in cls._create_object(response, connection=connection): yield obj
Returns a autopaging generator that yields each object returned one by one.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/resources/base.py#L111-L148
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
Connection.get
def get(self, resource="", rid=None, **query): """ Retrieves the resource with given id 'rid', or all resources of given type. Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying a limit=1 filter. Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0) Keyword arguments can be parsed for filtering the query, for example: connection.get('products', limit=3, min_price=10.5) (see Bigcommerce resource documentation). """ if rid: if resource[-1] != '/': resource += '/' resource += str(rid) response = self._run_method('GET', resource, query=query) return self._handle_response(resource, response)
python
def get(self, resource="", rid=None, **query): """ Retrieves the resource with given id 'rid', or all resources of given type. Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying a limit=1 filter. Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0) Keyword arguments can be parsed for filtering the query, for example: connection.get('products', limit=3, min_price=10.5) (see Bigcommerce resource documentation). """ if rid: if resource[-1] != '/': resource += '/' resource += str(rid) response = self._run_method('GET', resource, query=query) return self._handle_response(resource, response)
Retrieves the resource with given id 'rid', or all resources of given type. Keep in mind that the API returns a list for any query that doesn't specify an ID, even when applying a limit=1 filter. Also be aware that float values tend to come back as strings ("2.0000" instead of 2.0) Keyword arguments can be parsed for filtering the query, for example: connection.get('products', limit=3, min_price=10.5) (see Bigcommerce resource documentation).
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L83-L99
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
Connection.update
def update(self, resource, rid, updates): """ Updates the resource with id 'rid' with the given updates dictionary. """ if resource[-1] != '/': resource += '/' resource += str(rid) return self.put(resource, data=updates)
python
def update(self, resource, rid, updates): """ Updates the resource with id 'rid' with the given updates dictionary. """ if resource[-1] != '/': resource += '/' resource += str(rid) return self.put(resource, data=updates)
Updates the resource with id 'rid' with the given updates dictionary.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L101-L108
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
Connection.delete
def delete(self, resource, rid=None): # note that rid can't be 0 - problem? """ Deletes the resource with given id 'rid', or all resources of given type if rid is not supplied. """ if rid: if resource[-1] != '/': resource += '/' resource += str(rid) response = self._run_method('DELETE', resource) return self._handle_response(resource, response, suppress_empty=True)
python
def delete(self, resource, rid=None): # note that rid can't be 0 - problem? """ Deletes the resource with given id 'rid', or all resources of given type if rid is not supplied. """ if rid: if resource[-1] != '/': resource += '/' resource += str(rid) response = self._run_method('DELETE', resource) return self._handle_response(resource, response, suppress_empty=True)
Deletes the resource with given id 'rid', or all resources of given type if rid is not supplied.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L116-L125
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
Connection.put
def put(self, url, data): """ Make a PUT request to save data. data should be a dictionary. """ response = self._run_method('PUT', url, data=data) log.debug("OUTPUT: %s" % response.content) return self._handle_response(url, response)
python
def put(self, url, data): """ Make a PUT request to save data. data should be a dictionary. """ response = self._run_method('PUT', url, data=data) log.debug("OUTPUT: %s" % response.content) return self._handle_response(url, response)
Make a PUT request to save data. data should be a dictionary.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L133-L140
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
Connection.post
def post(self, url, data, headers={}): """ POST request for creating new objects. data should be a dictionary. """ response = self._run_method('POST', url, data=data, headers=headers) return self._handle_response(url, response)
python
def post(self, url, data, headers={}): """ POST request for creating new objects. data should be a dictionary. """ response = self._run_method('POST', url, data=data, headers=headers) return self._handle_response(url, response)
POST request for creating new objects. data should be a dictionary.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L142-L148
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
Connection._handle_response
def _handle_response(self, url, res, suppress_empty=True): """ Returns parsed JSON or raises an exception appropriately. """ self._last_response = res result = {} if res.status_code in (200, 201, 202): try: result = res.json() except Exception as e: # json might be invalid, or store might be down e.message += " (_handle_response failed to decode JSON: " + str(res.content) + ")" raise # TODO better exception elif res.status_code == 204 and not suppress_empty: raise EmptyResponseWarning("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code >= 500: raise ServerException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code == 429: raise RateLimitingException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code >= 400: raise ClientRequestException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code >= 300: raise RedirectionException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) return result
python
def _handle_response(self, url, res, suppress_empty=True): """ Returns parsed JSON or raises an exception appropriately. """ self._last_response = res result = {} if res.status_code in (200, 201, 202): try: result = res.json() except Exception as e: # json might be invalid, or store might be down e.message += " (_handle_response failed to decode JSON: " + str(res.content) + ")" raise # TODO better exception elif res.status_code == 204 and not suppress_empty: raise EmptyResponseWarning("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code >= 500: raise ServerException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code == 429: raise RateLimitingException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code >= 400: raise ClientRequestException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) elif res.status_code >= 300: raise RedirectionException("%d %s @ %s: %s" % (res.status_code, res.reason, url, res.content), res) return result
Returns parsed JSON or raises an exception appropriately.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L150-L172
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
OAuthConnection.verify_payload
def verify_payload(signed_payload, client_secret): """ Given a signed payload (usually passed as parameter in a GET request to the app's load URL) and a client secret, authenticates the payload and returns the user's data, or False on fail. Uses constant-time str comparison to prevent vulnerability to timing attacks. """ encoded_json, encoded_hmac = signed_payload.split('.') dc_json = base64.b64decode(encoded_json) signature = base64.b64decode(encoded_hmac) expected_sig = hmac.new(client_secret.encode(), base64.b64decode(encoded_json), hashlib.sha256).hexdigest() authorised = hmac.compare_digest(signature, expected_sig.encode()) return json.loads(dc_json.decode()) if authorised else False
python
def verify_payload(signed_payload, client_secret): """ Given a signed payload (usually passed as parameter in a GET request to the app's load URL) and a client secret, authenticates the payload and returns the user's data, or False on fail. Uses constant-time str comparison to prevent vulnerability to timing attacks. """ encoded_json, encoded_hmac = signed_payload.split('.') dc_json = base64.b64decode(encoded_json) signature = base64.b64decode(encoded_hmac) expected_sig = hmac.new(client_secret.encode(), base64.b64decode(encoded_json), hashlib.sha256).hexdigest() authorised = hmac.compare_digest(signature, expected_sig.encode()) return json.loads(dc_json.decode()) if authorised else False
Given a signed payload (usually passed as parameter in a GET request to the app's load URL) and a client secret, authenticates the payload and returns the user's data, or False on fail. Uses constant-time str comparison to prevent vulnerability to timing attacks.
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L217-L229
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
OAuthConnection.fetch_token
def fetch_token(self, client_secret, code, context, scope, redirect_uri, token_url='https://login.bigcommerce.com/oauth2/token'): """ Fetches a token from given token_url, using given parameters, and sets up session headers for future requests. redirect_uri should be the same as your callback URL. code, context, and scope should be passed as parameters to your callback URL on app installation. Raises HttpException on failure (same as Connection methods). """ res = self.post(token_url, {'client_id': self.client_id, 'client_secret': client_secret, 'code': code, 'context': context, 'scope': scope, 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri}, headers={'Content-Type': 'application/x-www-form-urlencoded'}) self._session.headers.update(self._oauth_headers(self.client_id, res['access_token'])) return res
python
def fetch_token(self, client_secret, code, context, scope, redirect_uri, token_url='https://login.bigcommerce.com/oauth2/token'): """ Fetches a token from given token_url, using given parameters, and sets up session headers for future requests. redirect_uri should be the same as your callback URL. code, context, and scope should be passed as parameters to your callback URL on app installation. Raises HttpException on failure (same as Connection methods). """ res = self.post(token_url, {'client_id': self.client_id, 'client_secret': client_secret, 'code': code, 'context': context, 'scope': scope, 'grant_type': 'authorization_code', 'redirect_uri': redirect_uri}, headers={'Content-Type': 'application/x-www-form-urlencoded'}) self._session.headers.update(self._oauth_headers(self.client_id, res['access_token'])) return res
Fetches a token from given token_url, using given parameters, and sets up session headers for future requests. redirect_uri should be the same as your callback URL. code, context, and scope should be passed as parameters to your callback URL on app installation. Raises HttpException on failure (same as Connection methods).
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L231-L250
bigcommerce/bigcommerce-api-python
bigcommerce/connection.py
OAuthConnection._handle_response
def _handle_response(self, url, res, suppress_empty=True): """ Adds rate limiting information on to the response object """ result = Connection._handle_response(self, url, res, suppress_empty) if 'X-Rate-Limit-Time-Reset-Ms' in res.headers: self.rate_limit = dict(ms_until_reset=int(res.headers['X-Rate-Limit-Time-Reset-Ms']), window_size_ms=int(res.headers['X-Rate-Limit-Time-Window-Ms']), requests_remaining=int(res.headers['X-Rate-Limit-Requests-Left']), requests_quota=int(res.headers['X-Rate-Limit-Requests-Quota'])) if self.rate_limiting_management: if self.rate_limiting_management['min_requests_remaining'] >= self.rate_limit['requests_remaining']: if self.rate_limiting_management['wait']: sleep(ceil(float(self.rate_limit['ms_until_reset']) / 1000)) if self.rate_limiting_management.get('callback_function'): callback = self.rate_limiting_management['callback_function'] args_dict = self.rate_limiting_management.get('callback_args') if args_dict: callback(args_dict) else: callback() return result
python
def _handle_response(self, url, res, suppress_empty=True): """ Adds rate limiting information on to the response object """ result = Connection._handle_response(self, url, res, suppress_empty) if 'X-Rate-Limit-Time-Reset-Ms' in res.headers: self.rate_limit = dict(ms_until_reset=int(res.headers['X-Rate-Limit-Time-Reset-Ms']), window_size_ms=int(res.headers['X-Rate-Limit-Time-Window-Ms']), requests_remaining=int(res.headers['X-Rate-Limit-Requests-Left']), requests_quota=int(res.headers['X-Rate-Limit-Requests-Quota'])) if self.rate_limiting_management: if self.rate_limiting_management['min_requests_remaining'] >= self.rate_limit['requests_remaining']: if self.rate_limiting_management['wait']: sleep(ceil(float(self.rate_limit['ms_until_reset']) / 1000)) if self.rate_limiting_management.get('callback_function'): callback = self.rate_limiting_management['callback_function'] args_dict = self.rate_limiting_management.get('callback_args') if args_dict: callback(args_dict) else: callback() return result
Adds rate limiting information on to the response object
https://github.com/bigcommerce/bigcommerce-api-python/blob/76a8f5d59fd44a4365f14a5959102e118cf35dee/bigcommerce/connection.py#L252-L274
FSX/misaka
misaka/api.py
escape_html
def escape_html(text, escape_slash=False): """ Binding for Hoedown's HTML escaping function. The implementation is inspired by the OWASP XSS Prevention recommendations: .. code-block:: none & --> &amp; < --> &lt; > --> &gt; " --> &quot; ' --> &#x27; / --> &#x2F; when escape_slash is set to True .. versionadded:: 2.1.0 """ byte_str = text.encode('utf-8') ob = lib.hoedown_buffer_new(OUNIT) lib.hoedown_escape_html(ob, byte_str, len(byte_str), int(escape_slash)) try: return to_string(ob) finally: lib.hoedown_buffer_free(ob)
python
def escape_html(text, escape_slash=False): """ Binding for Hoedown's HTML escaping function. The implementation is inspired by the OWASP XSS Prevention recommendations: .. code-block:: none & --> &amp; < --> &lt; > --> &gt; " --> &quot; ' --> &#x27; / --> &#x2F; when escape_slash is set to True .. versionadded:: 2.1.0 """ byte_str = text.encode('utf-8') ob = lib.hoedown_buffer_new(OUNIT) lib.hoedown_escape_html(ob, byte_str, len(byte_str), int(escape_slash)) try: return to_string(ob) finally: lib.hoedown_buffer_free(ob)
Binding for Hoedown's HTML escaping function. The implementation is inspired by the OWASP XSS Prevention recommendations: .. code-block:: none & --> &amp; < --> &lt; > --> &gt; " --> &quot; ' --> &#x27; / --> &#x2F; when escape_slash is set to True .. versionadded:: 2.1.0
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L69-L93
FSX/misaka
misaka/api.py
html
def html(text, extensions=0, render_flags=0): """ Convert markdown text to HTML. ``extensions`` can be a list or tuple of extensions (e.g. ``('fenced-code', 'footnotes', 'strikethrough')``) or an integer (e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``). ``render_flags`` can be a list or tuple of flags (e.g. ``('skip-html', 'hard-wrap')``) or an integer (e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``). """ extensions = args_to_int(extension_map, extensions) render_flags = args_to_int(html_flag_map, render_flags) ib = lib.hoedown_buffer_new(IUNIT) ob = lib.hoedown_buffer_new(OUNIT) renderer = lib.hoedown_html_renderer_new(render_flags, 0) document = lib.hoedown_document_new(renderer, extensions, 16); lib.hoedown_buffer_puts(ib, text.encode('utf-8')) lib.hoedown_document_render(document, ob, ib.data, ib.size); lib.hoedown_buffer_free(ib); lib.hoedown_document_free(document); lib.hoedown_html_renderer_free(renderer); try: return to_string(ob) finally: lib.hoedown_buffer_free(ob);
python
def html(text, extensions=0, render_flags=0): """ Convert markdown text to HTML. ``extensions`` can be a list or tuple of extensions (e.g. ``('fenced-code', 'footnotes', 'strikethrough')``) or an integer (e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``). ``render_flags`` can be a list or tuple of flags (e.g. ``('skip-html', 'hard-wrap')``) or an integer (e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``). """ extensions = args_to_int(extension_map, extensions) render_flags = args_to_int(html_flag_map, render_flags) ib = lib.hoedown_buffer_new(IUNIT) ob = lib.hoedown_buffer_new(OUNIT) renderer = lib.hoedown_html_renderer_new(render_flags, 0) document = lib.hoedown_document_new(renderer, extensions, 16); lib.hoedown_buffer_puts(ib, text.encode('utf-8')) lib.hoedown_document_render(document, ob, ib.data, ib.size); lib.hoedown_buffer_free(ib); lib.hoedown_document_free(document); lib.hoedown_html_renderer_free(renderer); try: return to_string(ob) finally: lib.hoedown_buffer_free(ob);
Convert markdown text to HTML. ``extensions`` can be a list or tuple of extensions (e.g. ``('fenced-code', 'footnotes', 'strikethrough')``) or an integer (e.g. ``EXT_FENCED_CODE | EXT_FOOTNOTES | EXT_STRIKETHROUGH``). ``render_flags`` can be a list or tuple of flags (e.g. ``('skip-html', 'hard-wrap')``) or an integer (e.g. ``HTML_SKIP_HTML | HTML_HARD_WRAP``).
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L96-L125
FSX/misaka
misaka/api.py
smartypants
def smartypants(text): """ Transforms sequences of characters into HTML entities. =================================== ===================== ========= Markdown HTML Result =================================== ===================== ========= ``'s`` (s, t, m, d, re, ll, ve) &rsquo;s ’s ``"Quotes"`` &ldquo;Quotes&rdquo; “Quotes” ``---`` &mdash; — ``--`` &ndash; – ``...`` &hellip; … ``. . .`` &hellip; … ``(c)`` &copy; © ``(r)`` &reg; ® ``(tm)`` &trade; ™ ``3/4`` &frac34; ¾ ``1/2`` &frac12; ½ ``1/4`` &frac14; ¼ =================================== ===================== ========= """ byte_str = text.encode('utf-8') ob = lib.hoedown_buffer_new(OUNIT) lib.hoedown_html_smartypants(ob, byte_str, len(byte_str)) try: return to_string(ob) finally: lib.hoedown_buffer_free(ob);
python
def smartypants(text): """ Transforms sequences of characters into HTML entities. =================================== ===================== ========= Markdown HTML Result =================================== ===================== ========= ``'s`` (s, t, m, d, re, ll, ve) &rsquo;s ’s ``"Quotes"`` &ldquo;Quotes&rdquo; “Quotes” ``---`` &mdash; — ``--`` &ndash; – ``...`` &hellip; … ``. . .`` &hellip; … ``(c)`` &copy; © ``(r)`` &reg; ® ``(tm)`` &trade; ™ ``3/4`` &frac34; ¾ ``1/2`` &frac12; ½ ``1/4`` &frac14; ¼ =================================== ===================== ========= """ byte_str = text.encode('utf-8') ob = lib.hoedown_buffer_new(OUNIT) lib.hoedown_html_smartypants(ob, byte_str, len(byte_str)) try: return to_string(ob) finally: lib.hoedown_buffer_free(ob);
Transforms sequences of characters into HTML entities. =================================== ===================== ========= Markdown HTML Result =================================== ===================== ========= ``'s`` (s, t, m, d, re, ll, ve) &rsquo;s ’s ``"Quotes"`` &ldquo;Quotes&rdquo; “Quotes” ``---`` &mdash; — ``--`` &ndash; – ``...`` &hellip; … ``. . .`` &hellip; … ``(c)`` &copy; © ``(r)`` &reg; ® ``(tm)`` &trade; ™ ``3/4`` &frac34; ¾ ``1/2`` &frac12; ½ ``1/4`` &frac14; ¼ =================================== ===================== =========
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L128-L156
FSX/misaka
misaka/api.py
SaferHtmlRenderer.autolink
def autolink(self, raw_url, is_email): """ Filters links generated by the ``autolink`` extension. """ if self.check_url(raw_url): url = self.rewrite_url(('mailto:' if is_email else '') + raw_url) url = escape_html(url) return '<a href="%s">%s</a>' % (url, escape_html(raw_url)) else: return escape_html('<%s>' % raw_url)
python
def autolink(self, raw_url, is_email): """ Filters links generated by the ``autolink`` extension. """ if self.check_url(raw_url): url = self.rewrite_url(('mailto:' if is_email else '') + raw_url) url = escape_html(url) return '<a href="%s">%s</a>' % (url, escape_html(raw_url)) else: return escape_html('<%s>' % raw_url)
Filters links generated by the ``autolink`` extension.
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L304-L313
FSX/misaka
misaka/api.py
SaferHtmlRenderer.image
def image(self, raw_url, title='', alt=''): """ Filters the ``src`` attribute of an image. Note that filtering the source URL of an ``<img>`` tag is only a very basic protection, and it's mostly useless in modern browsers (they block JavaScript in there by default). An example of attack that filtering does not thwart is phishing based on HTTP Auth, see `this issue <https://github.com/liberapay/liberapay.com/issues/504>`_ for details. To mitigate this issue you should only allow images from trusted services, for example your own image store, or a proxy (see :meth:`rewrite_url`). """ if self.check_url(raw_url, is_image_src=True): url = self.rewrite_url(raw_url, is_image_src=True) maybe_alt = ' alt="%s"' % escape_html(alt) if alt else '' maybe_title = ' title="%s"' % escape_html(title) if title else '' url = escape_html(url) return '<img src="%s"%s%s />' % (url, maybe_alt, maybe_title) else: return escape_html("![%s](%s)" % (alt, raw_url))
python
def image(self, raw_url, title='', alt=''): """ Filters the ``src`` attribute of an image. Note that filtering the source URL of an ``<img>`` tag is only a very basic protection, and it's mostly useless in modern browsers (they block JavaScript in there by default). An example of attack that filtering does not thwart is phishing based on HTTP Auth, see `this issue <https://github.com/liberapay/liberapay.com/issues/504>`_ for details. To mitigate this issue you should only allow images from trusted services, for example your own image store, or a proxy (see :meth:`rewrite_url`). """ if self.check_url(raw_url, is_image_src=True): url = self.rewrite_url(raw_url, is_image_src=True) maybe_alt = ' alt="%s"' % escape_html(alt) if alt else '' maybe_title = ' title="%s"' % escape_html(title) if title else '' url = escape_html(url) return '<img src="%s"%s%s />' % (url, maybe_alt, maybe_title) else: return escape_html("![%s](%s)" % (alt, raw_url))
Filters the ``src`` attribute of an image. Note that filtering the source URL of an ``<img>`` tag is only a very basic protection, and it's mostly useless in modern browsers (they block JavaScript in there by default). An example of attack that filtering does not thwart is phishing based on HTTP Auth, see `this issue <https://github.com/liberapay/liberapay.com/issues/504>`_ for details. To mitigate this issue you should only allow images from trusted services, for example your own image store, or a proxy (see :meth:`rewrite_url`).
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L315-L335
FSX/misaka
misaka/api.py
SaferHtmlRenderer.link
def link(self, content, raw_url, title=''): """ Filters links. """ if self.check_url(raw_url): url = self.rewrite_url(raw_url) maybe_title = ' title="%s"' % escape_html(title) if title else '' url = escape_html(url) return ('<a href="%s"%s>' % (url, maybe_title)) + content + '</a>' else: return escape_html("[%s](%s)" % (content, raw_url))
python
def link(self, content, raw_url, title=''): """ Filters links. """ if self.check_url(raw_url): url = self.rewrite_url(raw_url) maybe_title = ' title="%s"' % escape_html(title) if title else '' url = escape_html(url) return ('<a href="%s"%s>' % (url, maybe_title)) + content + '</a>' else: return escape_html("[%s](%s)" % (content, raw_url))
Filters links.
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L337-L347
FSX/misaka
misaka/api.py
SaferHtmlRenderer.check_url
def check_url(self, url, is_image_src=False): """ This method is used to check a URL. Returns :obj:`True` if the URL is "safe", :obj:`False` otherwise. The default implementation only allows HTTP and HTTPS links. That means no ``mailto:``, no ``xmpp:``, no ``ftp:``, etc. This method exists specifically to allow easy customization of link filtering through subclassing, so don't hesitate to write your own. If you're thinking of implementing a blacklist approach, see "`Which URL schemes are dangerous (XSS exploitable)? <http://security.stackexchange.com/q/148428/37409>`_". """ return bool(self._allowed_url_re.match(url))
python
def check_url(self, url, is_image_src=False): """ This method is used to check a URL. Returns :obj:`True` if the URL is "safe", :obj:`False` otherwise. The default implementation only allows HTTP and HTTPS links. That means no ``mailto:``, no ``xmpp:``, no ``ftp:``, etc. This method exists specifically to allow easy customization of link filtering through subclassing, so don't hesitate to write your own. If you're thinking of implementing a blacklist approach, see "`Which URL schemes are dangerous (XSS exploitable)? <http://security.stackexchange.com/q/148428/37409>`_". """ return bool(self._allowed_url_re.match(url))
This method is used to check a URL. Returns :obj:`True` if the URL is "safe", :obj:`False` otherwise. The default implementation only allows HTTP and HTTPS links. That means no ``mailto:``, no ``xmpp:``, no ``ftp:``, etc. This method exists specifically to allow easy customization of link filtering through subclassing, so don't hesitate to write your own. If you're thinking of implementing a blacklist approach, see "`Which URL schemes are dangerous (XSS exploitable)? <http://security.stackexchange.com/q/148428/37409>`_".
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L349-L365
FSX/misaka
misaka/api.py
SaferHtmlRenderer.rewrite_url
def rewrite_url(self, url, is_image_src=False): """ This method is called to rewrite URLs. It uses either ``self.link_rewrite`` or ``self.img_src_rewrite`` depending on the value of ``is_image_src``. The URL is returned unchanged if the corresponding attribute is :obj:`None`. """ rewrite = self.img_src_rewrite if is_image_src else self.link_rewrite if rewrite: return rewrite.format(url=urlquote(url)) return url
python
def rewrite_url(self, url, is_image_src=False): """ This method is called to rewrite URLs. It uses either ``self.link_rewrite`` or ``self.img_src_rewrite`` depending on the value of ``is_image_src``. The URL is returned unchanged if the corresponding attribute is :obj:`None`. """ rewrite = self.img_src_rewrite if is_image_src else self.link_rewrite if rewrite: return rewrite.format(url=urlquote(url)) return url
This method is called to rewrite URLs. It uses either ``self.link_rewrite`` or ``self.img_src_rewrite`` depending on the value of ``is_image_src``. The URL is returned unchanged if the corresponding attribute is :obj:`None`.
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/api.py#L367-L378
FSX/misaka
misaka/utils.py
args_to_int
def args_to_int(mapping, argument): """ Convert list of strings to an int using a mapping. """ if isinstance(argument, int): if argument == 0: return 0 deprecation('passing extensions and flags as constants is deprecated') return argument elif isinstance(argument, (tuple, list)): return reduce(op.or_, [mapping[n] for n in set(argument) if n in mapping], 0) raise TypeError('argument must be a list of strings or an int')
python
def args_to_int(mapping, argument): """ Convert list of strings to an int using a mapping. """ if isinstance(argument, int): if argument == 0: return 0 deprecation('passing extensions and flags as constants is deprecated') return argument elif isinstance(argument, (tuple, list)): return reduce(op.or_, [mapping[n] for n in set(argument) if n in mapping], 0) raise TypeError('argument must be a list of strings or an int')
Convert list of strings to an int using a mapping.
https://github.com/FSX/misaka/blob/c13aff82d370d606ff61361db79c166a897641cf/misaka/utils.py#L40-L51
vsergeev/u-msgpack-python
umsgpack.py
_pack3
def _pack3(obj, fp, **options): """ Serialize a Python object into MessagePack bytes. Args: obj: a Python object fp: a .write()-supporting file-like object Kwargs: ext_handlers (dict): dictionary of Ext handlers, mapping a custom type to a callable that packs an instance of the type into an Ext object force_float_precision (str): "single" to force packing floats as IEEE-754 single-precision floats, "double" to force packing floats as IEEE-754 double-precision floats. Returns: None. Raises: UnsupportedType(PackException): Object type not supported for packing. Example: >>> f = open('test.bin', 'wb') >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) >>> """ global compatibility ext_handlers = options.get("ext_handlers") if obj is None: _pack_nil(obj, fp, options) elif ext_handlers and obj.__class__ in ext_handlers: _pack_ext(ext_handlers[obj.__class__](obj), fp, options) elif isinstance(obj, bool): _pack_boolean(obj, fp, options) elif isinstance(obj, int): _pack_integer(obj, fp, options) elif isinstance(obj, float): _pack_float(obj, fp, options) elif compatibility and isinstance(obj, str): _pack_oldspec_raw(obj.encode('utf-8'), fp, options) elif compatibility and isinstance(obj, bytes): _pack_oldspec_raw(obj, fp, options) elif isinstance(obj, str): _pack_string(obj, fp, options) elif isinstance(obj, bytes): _pack_binary(obj, fp, options) elif isinstance(obj, (list, tuple)): _pack_array(obj, fp, options) elif isinstance(obj, dict): _pack_map(obj, fp, options) elif isinstance(obj, datetime.datetime): _pack_ext_timestamp(obj, fp, options) elif isinstance(obj, Ext): _pack_ext(obj, fp, options) elif ext_handlers: # Linear search for superclass t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) if t: _pack_ext(ext_handlers[t](obj), fp, options) else: raise UnsupportedTypeException( "unsupported type: %s" % str(type(obj))) else: raise UnsupportedTypeException( "unsupported type: %s" % str(type(obj)))
python
def _pack3(obj, fp, **options): """ Serialize a Python object into MessagePack bytes. Args: obj: a Python object fp: a .write()-supporting file-like object Kwargs: ext_handlers (dict): dictionary of Ext handlers, mapping a custom type to a callable that packs an instance of the type into an Ext object force_float_precision (str): "single" to force packing floats as IEEE-754 single-precision floats, "double" to force packing floats as IEEE-754 double-precision floats. Returns: None. Raises: UnsupportedType(PackException): Object type not supported for packing. Example: >>> f = open('test.bin', 'wb') >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) >>> """ global compatibility ext_handlers = options.get("ext_handlers") if obj is None: _pack_nil(obj, fp, options) elif ext_handlers and obj.__class__ in ext_handlers: _pack_ext(ext_handlers[obj.__class__](obj), fp, options) elif isinstance(obj, bool): _pack_boolean(obj, fp, options) elif isinstance(obj, int): _pack_integer(obj, fp, options) elif isinstance(obj, float): _pack_float(obj, fp, options) elif compatibility and isinstance(obj, str): _pack_oldspec_raw(obj.encode('utf-8'), fp, options) elif compatibility and isinstance(obj, bytes): _pack_oldspec_raw(obj, fp, options) elif isinstance(obj, str): _pack_string(obj, fp, options) elif isinstance(obj, bytes): _pack_binary(obj, fp, options) elif isinstance(obj, (list, tuple)): _pack_array(obj, fp, options) elif isinstance(obj, dict): _pack_map(obj, fp, options) elif isinstance(obj, datetime.datetime): _pack_ext_timestamp(obj, fp, options) elif isinstance(obj, Ext): _pack_ext(obj, fp, options) elif ext_handlers: # Linear search for superclass t = next((t for t in ext_handlers.keys() if isinstance(obj, t)), None) if t: _pack_ext(ext_handlers[t](obj), fp, options) else: raise UnsupportedTypeException( "unsupported type: %s" % str(type(obj))) else: raise UnsupportedTypeException( "unsupported type: %s" % str(type(obj)))
Serialize a Python object into MessagePack bytes. Args: obj: a Python object fp: a .write()-supporting file-like object Kwargs: ext_handlers (dict): dictionary of Ext handlers, mapping a custom type to a callable that packs an instance of the type into an Ext object force_float_precision (str): "single" to force packing floats as IEEE-754 single-precision floats, "double" to force packing floats as IEEE-754 double-precision floats. Returns: None. Raises: UnsupportedType(PackException): Object type not supported for packing. Example: >>> f = open('test.bin', 'wb') >>> umsgpack.pack({u"compact": True, u"schema": 0}, f) >>>
https://github.com/vsergeev/u-msgpack-python/blob/e290b768ce63177ae04ed96402915b75a9741f38/umsgpack.py#L486-L555
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
_get_task_target
def _get_task_target(): """Get the default target for a pipeline task. Current version id format is: user_defined_version.minor_version_number Current module id is just the module's name. It could be "default" Returns: A complete target name is of format version.module. If module is the default module, just version. None if target can not be determined. """ # Break circular dependency. # pylint: disable=g-import-not-at-top import pipeline if pipeline._TEST_MODE: return None # Further protect against test cases that doesn't set env vars # propertly. if ("CURRENT_VERSION_ID" not in os.environ or "CURRENT_MODULE_ID" not in os.environ): logging.warning("Running Pipeline in non TEST_MODE but important " "env vars are not set.") return None version = os.environ["CURRENT_VERSION_ID"].split(".")[0] module = os.environ["CURRENT_MODULE_ID"] return "%s.%s" % (version, module)
python
def _get_task_target(): """Get the default target for a pipeline task. Current version id format is: user_defined_version.minor_version_number Current module id is just the module's name. It could be "default" Returns: A complete target name is of format version.module. If module is the default module, just version. None if target can not be determined. """ # Break circular dependency. # pylint: disable=g-import-not-at-top import pipeline if pipeline._TEST_MODE: return None # Further protect against test cases that doesn't set env vars # propertly. if ("CURRENT_VERSION_ID" not in os.environ or "CURRENT_MODULE_ID" not in os.environ): logging.warning("Running Pipeline in non TEST_MODE but important " "env vars are not set.") return None version = os.environ["CURRENT_VERSION_ID"].split(".")[0] module = os.environ["CURRENT_MODULE_ID"] return "%s.%s" % (version, module)
Get the default target for a pipeline task. Current version id format is: user_defined_version.minor_version_number Current module id is just the module's name. It could be "default" Returns: A complete target name is of format version.module. If module is the default module, just version. None if target can not be determined.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L40-L66
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
for_name
def for_name(fq_name, recursive=False): """Find class/function/method specified by its fully qualified name. Fully qualified can be specified as: * <module_name>.<class_name> * <module_name>.<function_name> * <module_name>.<class_name>.<method_name> (an unbound method will be returned in this case). for_name works by doing __import__ for <module_name>, and looks for <class_name>/<function_name> in module's __dict__/attrs. If fully qualified name doesn't contain '.', the current module will be used. Args: fq_name: fully qualified name of something to find Returns: class object. Raises: ImportError: when specified module could not be loaded or the class was not found in the module. """ fq_name = str(fq_name) module_name = __name__ short_name = fq_name if fq_name.rfind(".") >= 0: (module_name, short_name) = (fq_name[:fq_name.rfind(".")], fq_name[fq_name.rfind(".") + 1:]) try: result = __import__(module_name, None, None, [short_name]) return result.__dict__[short_name] except KeyError: # If we're recursively inside a for_name() chain, then we want to raise # this error as a key error so we can report the actual source of the # problem. If we're *not* recursively being called, that means the # module was found and the specific item could not be loaded, and thus # we want to raise an ImportError directly. if recursive: raise else: raise ImportError("Could not find '%s' on path '%s'" % ( short_name, module_name)) except ImportError, e: # module_name is not actually a module. Try for_name for it to figure # out what's this. try: module = for_name(module_name, recursive=True) if hasattr(module, short_name): return getattr(module, short_name) else: # The module was found, but the function component is missing. raise KeyError() except KeyError: raise ImportError("Could not find '%s' on path '%s'" % ( short_name, module_name)) except ImportError: # This means recursive import attempts failed, thus we will raise the # first ImportError we encountered, since it's likely the most accurate. pass # Raise the original import error that caused all of this, since it is # likely the real cause of the overall problem. raise
python
def for_name(fq_name, recursive=False): """Find class/function/method specified by its fully qualified name. Fully qualified can be specified as: * <module_name>.<class_name> * <module_name>.<function_name> * <module_name>.<class_name>.<method_name> (an unbound method will be returned in this case). for_name works by doing __import__ for <module_name>, and looks for <class_name>/<function_name> in module's __dict__/attrs. If fully qualified name doesn't contain '.', the current module will be used. Args: fq_name: fully qualified name of something to find Returns: class object. Raises: ImportError: when specified module could not be loaded or the class was not found in the module. """ fq_name = str(fq_name) module_name = __name__ short_name = fq_name if fq_name.rfind(".") >= 0: (module_name, short_name) = (fq_name[:fq_name.rfind(".")], fq_name[fq_name.rfind(".") + 1:]) try: result = __import__(module_name, None, None, [short_name]) return result.__dict__[short_name] except KeyError: # If we're recursively inside a for_name() chain, then we want to raise # this error as a key error so we can report the actual source of the # problem. If we're *not* recursively being called, that means the # module was found and the specific item could not be loaded, and thus # we want to raise an ImportError directly. if recursive: raise else: raise ImportError("Could not find '%s' on path '%s'" % ( short_name, module_name)) except ImportError, e: # module_name is not actually a module. Try for_name for it to figure # out what's this. try: module = for_name(module_name, recursive=True) if hasattr(module, short_name): return getattr(module, short_name) else: # The module was found, but the function component is missing. raise KeyError() except KeyError: raise ImportError("Could not find '%s' on path '%s'" % ( short_name, module_name)) except ImportError: # This means recursive import attempts failed, thus we will raise the # first ImportError we encountered, since it's likely the most accurate. pass # Raise the original import error that caused all of this, since it is # likely the real cause of the overall problem. raise
Find class/function/method specified by its fully qualified name. Fully qualified can be specified as: * <module_name>.<class_name> * <module_name>.<function_name> * <module_name>.<class_name>.<method_name> (an unbound method will be returned in this case). for_name works by doing __import__ for <module_name>, and looks for <class_name>/<function_name> in module's __dict__/attrs. If fully qualified name doesn't contain '.', the current module will be used. Args: fq_name: fully qualified name of something to find Returns: class object. Raises: ImportError: when specified module could not be loaded or the class was not found in the module.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L69-L133
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
is_generator_function
def is_generator_function(obj): """Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function. """ CO_GENERATOR = 0x20 return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and obj.func_code.co_flags & CO_GENERATOR))
python
def is_generator_function(obj): """Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function. """ CO_GENERATOR = 0x20 return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and obj.func_code.co_flags & CO_GENERATOR))
Return true if the object is a user-defined generator function. Generator function objects provides same attributes as functions. See isfunction.__doc__ for attributes listing. Adapted from Python 2.6. Args: obj: an object to test. Returns: true if the object is generator function.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L136-L152
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
_register_json_primitive
def _register_json_primitive(object_type, encoder, decoder): """Extend what Pipeline can serialize. Args: object_type: type of the object. encoder: a function that takes in an object and returns a dict of json primitives. decoder: inverse function of encoder. """ global _TYPE_TO_ENCODER global _TYPE_NAME_TO_DECODER if object_type not in _TYPE_TO_ENCODER: _TYPE_TO_ENCODER[object_type] = encoder _TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
python
def _register_json_primitive(object_type, encoder, decoder): """Extend what Pipeline can serialize. Args: object_type: type of the object. encoder: a function that takes in an object and returns a dict of json primitives. decoder: inverse function of encoder. """ global _TYPE_TO_ENCODER global _TYPE_NAME_TO_DECODER if object_type not in _TYPE_TO_ENCODER: _TYPE_TO_ENCODER[object_type] = encoder _TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
Extend what Pipeline can serialize. Args: object_type: type of the object. encoder: a function that takes in an object and returns a dict of json primitives. decoder: inverse function of encoder.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L211-L224
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
_JsonDecodeKey
def _JsonDecodeKey(d): """Json decode a ndb.Key object.""" k_c = d['key_string'] if isinstance(k_c, (list, tuple)): return ndb.Key(flat=k_c) return ndb.Key(urlsafe=d['key_string'])
python
def _JsonDecodeKey(d): """Json decode a ndb.Key object.""" k_c = d['key_string'] if isinstance(k_c, (list, tuple)): return ndb.Key(flat=k_c) return ndb.Key(urlsafe=d['key_string'])
Json decode a ndb.Key object.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L238-L243
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
JsonEncoder.default
def default(self, o): """Inherit docs.""" if type(o) in _TYPE_TO_ENCODER: encoder = _TYPE_TO_ENCODER[type(o)] json_struct = encoder(o) json_struct[self.TYPE_ID] = type(o).__name__ return json_struct return super(JsonEncoder, self).default(o)
python
def default(self, o): """Inherit docs.""" if type(o) in _TYPE_TO_ENCODER: encoder = _TYPE_TO_ENCODER[type(o)] json_struct = encoder(o) json_struct[self.TYPE_ID] = type(o).__name__ return json_struct return super(JsonEncoder, self).default(o)
Inherit docs.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L160-L167
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/util.py
JsonDecoder._dict_to_obj
def _dict_to_obj(self, d): """Converts a dictionary of json object to a Python object.""" if JsonEncoder.TYPE_ID not in d: return d type_name = d.pop(JsonEncoder.TYPE_ID) if type_name in _TYPE_NAME_TO_DECODER: decoder = _TYPE_NAME_TO_DECODER[type_name] return decoder(d) else: raise TypeError("Invalid type %s.", type_name)
python
def _dict_to_obj(self, d): """Converts a dictionary of json object to a Python object.""" if JsonEncoder.TYPE_ID not in d: return d type_name = d.pop(JsonEncoder.TYPE_ID) if type_name in _TYPE_NAME_TO_DECODER: decoder = _TYPE_NAME_TO_DECODER[type_name] return decoder(d) else: raise TypeError("Invalid type %s.", type_name)
Converts a dictionary of json object to a Python object.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/util.py#L178-L188
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_short_repr
def _short_repr(obj): """Helper function returns a truncated repr() of an object.""" stringified = pprint.saferepr(obj) if len(stringified) > 200: return '%s... (%d bytes)' % (stringified[:200], len(stringified)) return stringified
python
def _short_repr(obj): """Helper function returns a truncated repr() of an object.""" stringified = pprint.saferepr(obj) if len(stringified) > 200: return '%s... (%d bytes)' % (stringified[:200], len(stringified)) return stringified
Helper function returns a truncated repr() of an object.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1233-L1238
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_write_json_blob
def _write_json_blob(encoded_value, pipeline_id=None): """Writes a JSON encoded value to a Cloud Storage File. This function will store the blob in a GCS file in the default bucket under the appengine_pipeline directory. Optionally using another directory level specified by pipeline_id Args: encoded_value: The encoded JSON string. pipeline_id: A pipeline id to segment files in Cloud Storage, if none, the file will be created under appengine_pipeline Returns: The blobstore.BlobKey for the file that was created. """ default_bucket = app_identity.get_default_gcs_bucket_name() if default_bucket is None: raise Exception( "No default cloud storage bucket has been set for this application. " "This app was likely created before v1.9.0, please see: " "https://cloud.google.com/appengine/docs/php/googlestorage/setup") path_components = ['/', default_bucket, "appengine_pipeline"] if pipeline_id: path_components.append(pipeline_id) path_components.append(uuid.uuid4().hex) # Use posixpath to get a / even if we're running on windows somehow file_name = posixpath.join(*path_components) with cloudstorage.open(file_name, 'w', content_type='application/json') as f: for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE): end_index = start_index + _MAX_JSON_SIZE f.write(encoded_value[start_index:end_index]) key_str = blobstore.create_gs_key("/gs" + file_name) logging.debug("Created blob for filename = %s gs_key = %s", file_name, key_str) return blobstore.BlobKey(key_str)
python
def _write_json_blob(encoded_value, pipeline_id=None): """Writes a JSON encoded value to a Cloud Storage File. This function will store the blob in a GCS file in the default bucket under the appengine_pipeline directory. Optionally using another directory level specified by pipeline_id Args: encoded_value: The encoded JSON string. pipeline_id: A pipeline id to segment files in Cloud Storage, if none, the file will be created under appengine_pipeline Returns: The blobstore.BlobKey for the file that was created. """ default_bucket = app_identity.get_default_gcs_bucket_name() if default_bucket is None: raise Exception( "No default cloud storage bucket has been set for this application. " "This app was likely created before v1.9.0, please see: " "https://cloud.google.com/appengine/docs/php/googlestorage/setup") path_components = ['/', default_bucket, "appengine_pipeline"] if pipeline_id: path_components.append(pipeline_id) path_components.append(uuid.uuid4().hex) # Use posixpath to get a / even if we're running on windows somehow file_name = posixpath.join(*path_components) with cloudstorage.open(file_name, 'w', content_type='application/json') as f: for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE): end_index = start_index + _MAX_JSON_SIZE f.write(encoded_value[start_index:end_index]) key_str = blobstore.create_gs_key("/gs" + file_name) logging.debug("Created blob for filename = %s gs_key = %s", file_name, key_str) return blobstore.BlobKey(key_str)
Writes a JSON encoded value to a Cloud Storage File. This function will store the blob in a GCS file in the default bucket under the appengine_pipeline directory. Optionally using another directory level specified by pipeline_id Args: encoded_value: The encoded JSON string. pipeline_id: A pipeline id to segment files in Cloud Storage, if none, the file will be created under appengine_pipeline Returns: The blobstore.BlobKey for the file that was created.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1241-L1276
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_dereference_args
def _dereference_args(pipeline_name, args, kwargs): """Dereference a Pipeline's arguments that are slots, validating them. Each argument value passed in is assumed to be a dictionary with the format: {'type': 'value', 'value': 'serializable'} # A resolved value. {'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot. Args: pipeline_name: The name of the pipeline class; used for debugging. args: Iterable of positional arguments. kwargs: Dictionary of keyword arguments. Returns: Tuple (args, kwargs) where: Args: A list of positional arguments values that are all dereferenced. Kwargs: A list of keyword arguments values that are all dereferenced. Raises: SlotNotFilledError if any of the supplied 'slot_key' records are not present in the Datastore or have not yet been filled. UnexpectedPipelineError if an unknown parameter type was passed. """ lookup_slots = set() for arg in itertools.chain(args, kwargs.itervalues()): if arg['type'] == 'slot': lookup_slots.add(db.Key(arg['slot_key'])) slot_dict = {} for key, slot_record in zip(lookup_slots, db.get(lookup_slots)): if slot_record is None or slot_record.status != _SlotRecord.FILLED: raise SlotNotFilledError( 'Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' % (key, pipeline_name, _short_repr(args), _short_repr(kwargs))) slot_dict[key] = slot_record.value arg_list = [] for current_arg in args: if current_arg['type'] == 'slot': arg_list.append(slot_dict[db.Key(current_arg['slot_key'])]) elif current_arg['type'] == 'value': arg_list.append(current_arg['value']) else: raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg) kwarg_dict = {} for key, current_arg in kwargs.iteritems(): if current_arg['type'] == 'slot': kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])] elif current_arg['type'] == 'value': kwarg_dict[key] = current_arg['value'] else: raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg) return (arg_list, kwarg_dict)
python
def _dereference_args(pipeline_name, args, kwargs): """Dereference a Pipeline's arguments that are slots, validating them. Each argument value passed in is assumed to be a dictionary with the format: {'type': 'value', 'value': 'serializable'} # A resolved value. {'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot. Args: pipeline_name: The name of the pipeline class; used for debugging. args: Iterable of positional arguments. kwargs: Dictionary of keyword arguments. Returns: Tuple (args, kwargs) where: Args: A list of positional arguments values that are all dereferenced. Kwargs: A list of keyword arguments values that are all dereferenced. Raises: SlotNotFilledError if any of the supplied 'slot_key' records are not present in the Datastore or have not yet been filled. UnexpectedPipelineError if an unknown parameter type was passed. """ lookup_slots = set() for arg in itertools.chain(args, kwargs.itervalues()): if arg['type'] == 'slot': lookup_slots.add(db.Key(arg['slot_key'])) slot_dict = {} for key, slot_record in zip(lookup_slots, db.get(lookup_slots)): if slot_record is None or slot_record.status != _SlotRecord.FILLED: raise SlotNotFilledError( 'Slot "%s" missing its value. From %s(*args=%s, **kwargs=%s)' % (key, pipeline_name, _short_repr(args), _short_repr(kwargs))) slot_dict[key] = slot_record.value arg_list = [] for current_arg in args: if current_arg['type'] == 'slot': arg_list.append(slot_dict[db.Key(current_arg['slot_key'])]) elif current_arg['type'] == 'value': arg_list.append(current_arg['value']) else: raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg) kwarg_dict = {} for key, current_arg in kwargs.iteritems(): if current_arg['type'] == 'slot': kwarg_dict[key] = slot_dict[db.Key(current_arg['slot_key'])] elif current_arg['type'] == 'value': kwarg_dict[key] = current_arg['value'] else: raise UnexpectedPipelineError('Unknown parameter type: %r' % current_arg) return (arg_list, kwarg_dict)
Dereference a Pipeline's arguments that are slots, validating them. Each argument value passed in is assumed to be a dictionary with the format: {'type': 'value', 'value': 'serializable'} # A resolved value. {'type': 'slot', 'slot_key': 'str() on a db.Key'} # A pending Slot. Args: pipeline_name: The name of the pipeline class; used for debugging. args: Iterable of positional arguments. kwargs: Dictionary of keyword arguments. Returns: Tuple (args, kwargs) where: Args: A list of positional arguments values that are all dereferenced. Kwargs: A list of keyword arguments values that are all dereferenced. Raises: SlotNotFilledError if any of the supplied 'slot_key' records are not present in the Datastore or have not yet been filled. UnexpectedPipelineError if an unknown parameter type was passed.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1279-L1332
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_generate_args
def _generate_args(pipeline, future, queue_name, base_path): """Generate the params used to describe a Pipeline's depedencies. The arguments passed to this method may be normal values, Slot instances (for named outputs), or PipelineFuture instances (for referring to the default output slot). Args: pipeline: The Pipeline instance to generate args for. future: The PipelineFuture for the Pipeline these arguments correspond to. queue_name: The queue to run the pipeline on. base_path: Relative URL for pipeline URL handlers. Returns: Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where: dependent_slots: List of db.Key instances of _SlotRecords on which this pipeline will need to block before execution (passed to create a _BarrierRecord for running the pipeline). output_slot_keys: List of db.Key instances of _SlotRecords that will be filled by this pipeline during its execution (passed to create a _BarrierRecord for finalizing the pipeline). params_text: JSON dictionary of pipeline parameters to be serialized and saved in a corresponding _PipelineRecord. Will be None if the params are too big and must be saved in a blob instead. params_blob: JSON dictionary of pipeline parameters to be serialized and saved in a Blob file, and then attached to a _PipelineRecord. Will be None if the params data size was small enough to fit in the entity. """ params = { 'args': [], 'kwargs': {}, 'after_all': [], 'output_slots': {}, 'class_path': pipeline._class_path, 'queue_name': queue_name, 'base_path': base_path, 'backoff_seconds': pipeline.backoff_seconds, 'backoff_factor': pipeline.backoff_factor, 'max_attempts': pipeline.max_attempts, 'task_retry': pipeline.task_retry, 'target': pipeline.target, } dependent_slots = set() arg_list = params['args'] for current_arg in pipeline.args: if isinstance(current_arg, PipelineFuture): current_arg = current_arg.default if isinstance(current_arg, Slot): arg_list.append({'type': 'slot', 'slot_key': str(current_arg.key)}) dependent_slots.add(current_arg.key) else: arg_list.append({'type': 'value', 'value': current_arg}) kwarg_dict = params['kwargs'] for name, current_arg in pipeline.kwargs.iteritems(): if isinstance(current_arg, PipelineFuture): current_arg = current_arg.default if isinstance(current_arg, Slot): kwarg_dict[name] = {'type': 'slot', 'slot_key': str(current_arg.key)} dependent_slots.add(current_arg.key) else: kwarg_dict[name] = {'type': 'value', 'value': current_arg} after_all = params['after_all'] for other_future in future._after_all_pipelines: slot_key = other_future._output_dict['default'].key after_all.append(str(slot_key)) dependent_slots.add(slot_key) output_slots = params['output_slots'] output_slot_keys = set() for name, slot in future._output_dict.iteritems(): output_slot_keys.add(slot.key) output_slots[name] = str(slot.key) params_encoded = json.dumps(params, cls=mr_util.JsonEncoder) params_text = None params_blob = None if len(params_encoded) > _MAX_JSON_SIZE: params_blob = _write_json_blob(params_encoded, pipeline.pipeline_id) else: params_text = params_encoded return dependent_slots, output_slot_keys, params_text, params_blob
python
def _generate_args(pipeline, future, queue_name, base_path): """Generate the params used to describe a Pipeline's depedencies. The arguments passed to this method may be normal values, Slot instances (for named outputs), or PipelineFuture instances (for referring to the default output slot). Args: pipeline: The Pipeline instance to generate args for. future: The PipelineFuture for the Pipeline these arguments correspond to. queue_name: The queue to run the pipeline on. base_path: Relative URL for pipeline URL handlers. Returns: Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where: dependent_slots: List of db.Key instances of _SlotRecords on which this pipeline will need to block before execution (passed to create a _BarrierRecord for running the pipeline). output_slot_keys: List of db.Key instances of _SlotRecords that will be filled by this pipeline during its execution (passed to create a _BarrierRecord for finalizing the pipeline). params_text: JSON dictionary of pipeline parameters to be serialized and saved in a corresponding _PipelineRecord. Will be None if the params are too big and must be saved in a blob instead. params_blob: JSON dictionary of pipeline parameters to be serialized and saved in a Blob file, and then attached to a _PipelineRecord. Will be None if the params data size was small enough to fit in the entity. """ params = { 'args': [], 'kwargs': {}, 'after_all': [], 'output_slots': {}, 'class_path': pipeline._class_path, 'queue_name': queue_name, 'base_path': base_path, 'backoff_seconds': pipeline.backoff_seconds, 'backoff_factor': pipeline.backoff_factor, 'max_attempts': pipeline.max_attempts, 'task_retry': pipeline.task_retry, 'target': pipeline.target, } dependent_slots = set() arg_list = params['args'] for current_arg in pipeline.args: if isinstance(current_arg, PipelineFuture): current_arg = current_arg.default if isinstance(current_arg, Slot): arg_list.append({'type': 'slot', 'slot_key': str(current_arg.key)}) dependent_slots.add(current_arg.key) else: arg_list.append({'type': 'value', 'value': current_arg}) kwarg_dict = params['kwargs'] for name, current_arg in pipeline.kwargs.iteritems(): if isinstance(current_arg, PipelineFuture): current_arg = current_arg.default if isinstance(current_arg, Slot): kwarg_dict[name] = {'type': 'slot', 'slot_key': str(current_arg.key)} dependent_slots.add(current_arg.key) else: kwarg_dict[name] = {'type': 'value', 'value': current_arg} after_all = params['after_all'] for other_future in future._after_all_pipelines: slot_key = other_future._output_dict['default'].key after_all.append(str(slot_key)) dependent_slots.add(slot_key) output_slots = params['output_slots'] output_slot_keys = set() for name, slot in future._output_dict.iteritems(): output_slot_keys.add(slot.key) output_slots[name] = str(slot.key) params_encoded = json.dumps(params, cls=mr_util.JsonEncoder) params_text = None params_blob = None if len(params_encoded) > _MAX_JSON_SIZE: params_blob = _write_json_blob(params_encoded, pipeline.pipeline_id) else: params_text = params_encoded return dependent_slots, output_slot_keys, params_text, params_blob
Generate the params used to describe a Pipeline's depedencies. The arguments passed to this method may be normal values, Slot instances (for named outputs), or PipelineFuture instances (for referring to the default output slot). Args: pipeline: The Pipeline instance to generate args for. future: The PipelineFuture for the Pipeline these arguments correspond to. queue_name: The queue to run the pipeline on. base_path: Relative URL for pipeline URL handlers. Returns: Tuple (dependent_slots, output_slot_keys, params_text, params_blob) where: dependent_slots: List of db.Key instances of _SlotRecords on which this pipeline will need to block before execution (passed to create a _BarrierRecord for running the pipeline). output_slot_keys: List of db.Key instances of _SlotRecords that will be filled by this pipeline during its execution (passed to create a _BarrierRecord for finalizing the pipeline). params_text: JSON dictionary of pipeline parameters to be serialized and saved in a corresponding _PipelineRecord. Will be None if the params are too big and must be saved in a blob instead. params_blob: JSON dictionary of pipeline parameters to be serialized and saved in a Blob file, and then attached to a _PipelineRecord. Will be None if the params data size was small enough to fit in the entity.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1335-L1419
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_get_timestamp_ms
def _get_timestamp_ms(when): """Converts a datetime.datetime to integer milliseconds since the epoch. Requires special handling to preserve microseconds. Args: when: A datetime.datetime instance. Returns: Integer time since the epoch in milliseconds. If the supplied 'when' is None, the return value will be None. """ if when is None: return None ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0) ms_since_epoch += when.microsecond / 1000.0 return int(ms_since_epoch)
python
def _get_timestamp_ms(when): """Converts a datetime.datetime to integer milliseconds since the epoch. Requires special handling to preserve microseconds. Args: when: A datetime.datetime instance. Returns: Integer time since the epoch in milliseconds. If the supplied 'when' is None, the return value will be None. """ if when is None: return None ms_since_epoch = float(time.mktime(when.utctimetuple()) * 1000.0) ms_since_epoch += when.microsecond / 1000.0 return int(ms_since_epoch)
Converts a datetime.datetime to integer milliseconds since the epoch. Requires special handling to preserve microseconds. Args: when: A datetime.datetime instance. Returns: Integer time since the epoch in milliseconds. If the supplied 'when' is None, the return value will be None.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2872-L2888
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_get_internal_status
def _get_internal_status(pipeline_key=None, pipeline_dict=None, slot_dict=None, barrier_dict=None, status_dict=None): """Gets the UI dictionary of a pipeline from a set of status dictionaries. Args: pipeline_key: The key of the pipeline to lookup. pipeline_dict: Dictionary mapping pipeline db.Key to _PipelineRecord. Default is an empty dictionary. slot_dict: Dictionary mapping slot db.Key to _SlotRecord. Default is an empty dictionary. barrier_dict: Dictionary mapping barrier db.Key to _BarrierRecord. Default is an empty dictionary. status_dict: Dictionary mapping status record db.Key to _StatusRecord. Default is an empty dictionary. Returns: Dictionary with the keys: classPath: The pipeline function being run. args: List of positional argument slot dictionaries. kwargs: Dictionary of keyword argument slot dictionaries. outputs: Dictionary of output slot dictionaries. children: List of child pipeline IDs. queueName: Queue on which this pipeline is running. afterSlotKeys: List of Slot Ids after which this pipeline runs. currentAttempt: Number of the current attempt, starting at 1. maxAttempts: Maximum number of attempts before aborting. backoffSeconds: Constant factor for backoff before retrying. backoffFactor: Exponential factor for backoff before retrying. status: Current status of the pipeline. startTimeMs: When this pipeline ran or will run due to retries, if present. endTimeMs: When this pipeline finalized, if present. lastRetryMessage: Why the pipeline failed during the last retry, if there was a failure; may be empty. abortMessage: For root pipelines, why the pipeline was aborted if it was aborted; may be empty. Dictionary will contain these keys if explicit status is set: statusTimeMs: When the status was set as milliseconds since the epoch. statusMessage: Status message, if present. statusConsoleUrl: The relative URL for the console of this pipeline. statusLinks: Dictionary mapping human-readable names to relative URLs for related URLs to this pipeline. Raises: PipelineStatusError if any input is bad. """ if pipeline_dict is None: pipeline_dict = {} if slot_dict is None: slot_dict = {} if barrier_dict is None: barrier_dict = {} if status_dict is None: status_dict = {} pipeline_record = pipeline_dict.get(pipeline_key) if pipeline_record is None: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % pipeline_key.name()) params = pipeline_record.params root_pipeline_key = \ _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record) default_slot_key = db.Key(params['output_slots']['default']) start_barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.START, parent=pipeline_key) finalize_barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) status_record_key = db.Key.from_path( _StatusRecord.kind(), pipeline_key.name()) start_barrier = barrier_dict.get(start_barrier_key) finalize_barrier = barrier_dict.get(finalize_barrier_key) default_slot = slot_dict.get(default_slot_key) status_record = status_dict.get(status_record_key) if finalize_barrier is None: raise PipelineStatusError( 'Finalization barrier missing for pipeline ID "%s"' % pipeline_key.name()) if default_slot is None: raise PipelineStatusError( 'Default output slot with key=%s missing for pipeline ID "%s"' % ( default_slot_key, pipeline_key.name())) output = { 'classPath': pipeline_record.class_path, 'args': list(params['args']), 'kwargs': params['kwargs'].copy(), 'outputs': params['output_slots'].copy(), 'children': [key.name() for key in pipeline_record.fanned_out], 'queueName': params['queue_name'], 'afterSlotKeys': [str(key) for key in params['after_all']], 'currentAttempt': pipeline_record.current_attempt + 1, 'maxAttempts': pipeline_record.max_attempts, 'backoffSeconds': pipeline_record.params['backoff_seconds'], 'backoffFactor': pipeline_record.params['backoff_factor'], } # TODO(user): Truncate args, kwargs, and outputs to < 1MB each so we # can reasonably return the whole tree of pipelines and their outputs. # Coerce each value to a string to truncate if necessary. For now if the # params are too big it will just cause the whole status page to break. # Fix the key names in parameters to match JavaScript style. for value_dict in itertools.chain( output['args'], output['kwargs'].itervalues()): if 'slot_key' in value_dict: value_dict['slotKey'] = value_dict.pop('slot_key') # Figure out the pipeline's status. if pipeline_record.status in (_PipelineRecord.WAITING, _PipelineRecord.RUN): if default_slot.status == _SlotRecord.FILLED: status = 'finalizing' elif (pipeline_record.status == _PipelineRecord.WAITING and pipeline_record.next_retry_time is not None): status = 'retry' elif start_barrier and start_barrier.status == _BarrierRecord.WAITING: # start_barrier will be missing for root pipelines status = 'waiting' else: status = 'run' elif pipeline_record.status == _PipelineRecord.DONE: status = 'done' elif pipeline_record.status == _PipelineRecord.ABORTED: status = 'aborted' output['status'] = status if status_record: output['statusTimeMs'] = _get_timestamp_ms(status_record.status_time) if status_record.message: output['statusMessage'] = status_record.message if status_record.console_url: output['statusConsoleUrl'] = status_record.console_url if status_record.link_names: output['statusLinks'] = dict( zip(status_record.link_names, status_record.link_urls)) # Populate status-depenedent fields. if status in ('run', 'finalizing', 'done', 'retry'): if pipeline_record.next_retry_time is not None: output['startTimeMs'] = _get_timestamp_ms(pipeline_record.next_retry_time) elif start_barrier: # start_barrier will be missing for root pipelines output['startTimeMs'] = _get_timestamp_ms(start_barrier.trigger_time) elif pipeline_record.start_time: # Assume this pipeline ran immediately upon spawning with no # start barrier or it's the root pipeline. output['startTimeMs'] = _get_timestamp_ms(pipeline_record.start_time) if status in ('finalizing',): output['endTimeMs'] = _get_timestamp_ms(default_slot.fill_time) if status in ('done',): output['endTimeMs'] = _get_timestamp_ms(pipeline_record.finalized_time) if pipeline_record.next_retry_time is not None: output['lastRetryMessage'] = pipeline_record.retry_message if pipeline_record.abort_message: output['abortMessage'] = pipeline_record.abort_message return output
python
def _get_internal_status(pipeline_key=None, pipeline_dict=None, slot_dict=None, barrier_dict=None, status_dict=None): """Gets the UI dictionary of a pipeline from a set of status dictionaries. Args: pipeline_key: The key of the pipeline to lookup. pipeline_dict: Dictionary mapping pipeline db.Key to _PipelineRecord. Default is an empty dictionary. slot_dict: Dictionary mapping slot db.Key to _SlotRecord. Default is an empty dictionary. barrier_dict: Dictionary mapping barrier db.Key to _BarrierRecord. Default is an empty dictionary. status_dict: Dictionary mapping status record db.Key to _StatusRecord. Default is an empty dictionary. Returns: Dictionary with the keys: classPath: The pipeline function being run. args: List of positional argument slot dictionaries. kwargs: Dictionary of keyword argument slot dictionaries. outputs: Dictionary of output slot dictionaries. children: List of child pipeline IDs. queueName: Queue on which this pipeline is running. afterSlotKeys: List of Slot Ids after which this pipeline runs. currentAttempt: Number of the current attempt, starting at 1. maxAttempts: Maximum number of attempts before aborting. backoffSeconds: Constant factor for backoff before retrying. backoffFactor: Exponential factor for backoff before retrying. status: Current status of the pipeline. startTimeMs: When this pipeline ran or will run due to retries, if present. endTimeMs: When this pipeline finalized, if present. lastRetryMessage: Why the pipeline failed during the last retry, if there was a failure; may be empty. abortMessage: For root pipelines, why the pipeline was aborted if it was aborted; may be empty. Dictionary will contain these keys if explicit status is set: statusTimeMs: When the status was set as milliseconds since the epoch. statusMessage: Status message, if present. statusConsoleUrl: The relative URL for the console of this pipeline. statusLinks: Dictionary mapping human-readable names to relative URLs for related URLs to this pipeline. Raises: PipelineStatusError if any input is bad. """ if pipeline_dict is None: pipeline_dict = {} if slot_dict is None: slot_dict = {} if barrier_dict is None: barrier_dict = {} if status_dict is None: status_dict = {} pipeline_record = pipeline_dict.get(pipeline_key) if pipeline_record is None: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % pipeline_key.name()) params = pipeline_record.params root_pipeline_key = \ _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record) default_slot_key = db.Key(params['output_slots']['default']) start_barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.START, parent=pipeline_key) finalize_barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) status_record_key = db.Key.from_path( _StatusRecord.kind(), pipeline_key.name()) start_barrier = barrier_dict.get(start_barrier_key) finalize_barrier = barrier_dict.get(finalize_barrier_key) default_slot = slot_dict.get(default_slot_key) status_record = status_dict.get(status_record_key) if finalize_barrier is None: raise PipelineStatusError( 'Finalization barrier missing for pipeline ID "%s"' % pipeline_key.name()) if default_slot is None: raise PipelineStatusError( 'Default output slot with key=%s missing for pipeline ID "%s"' % ( default_slot_key, pipeline_key.name())) output = { 'classPath': pipeline_record.class_path, 'args': list(params['args']), 'kwargs': params['kwargs'].copy(), 'outputs': params['output_slots'].copy(), 'children': [key.name() for key in pipeline_record.fanned_out], 'queueName': params['queue_name'], 'afterSlotKeys': [str(key) for key in params['after_all']], 'currentAttempt': pipeline_record.current_attempt + 1, 'maxAttempts': pipeline_record.max_attempts, 'backoffSeconds': pipeline_record.params['backoff_seconds'], 'backoffFactor': pipeline_record.params['backoff_factor'], } # TODO(user): Truncate args, kwargs, and outputs to < 1MB each so we # can reasonably return the whole tree of pipelines and their outputs. # Coerce each value to a string to truncate if necessary. For now if the # params are too big it will just cause the whole status page to break. # Fix the key names in parameters to match JavaScript style. for value_dict in itertools.chain( output['args'], output['kwargs'].itervalues()): if 'slot_key' in value_dict: value_dict['slotKey'] = value_dict.pop('slot_key') # Figure out the pipeline's status. if pipeline_record.status in (_PipelineRecord.WAITING, _PipelineRecord.RUN): if default_slot.status == _SlotRecord.FILLED: status = 'finalizing' elif (pipeline_record.status == _PipelineRecord.WAITING and pipeline_record.next_retry_time is not None): status = 'retry' elif start_barrier and start_barrier.status == _BarrierRecord.WAITING: # start_barrier will be missing for root pipelines status = 'waiting' else: status = 'run' elif pipeline_record.status == _PipelineRecord.DONE: status = 'done' elif pipeline_record.status == _PipelineRecord.ABORTED: status = 'aborted' output['status'] = status if status_record: output['statusTimeMs'] = _get_timestamp_ms(status_record.status_time) if status_record.message: output['statusMessage'] = status_record.message if status_record.console_url: output['statusConsoleUrl'] = status_record.console_url if status_record.link_names: output['statusLinks'] = dict( zip(status_record.link_names, status_record.link_urls)) # Populate status-depenedent fields. if status in ('run', 'finalizing', 'done', 'retry'): if pipeline_record.next_retry_time is not None: output['startTimeMs'] = _get_timestamp_ms(pipeline_record.next_retry_time) elif start_barrier: # start_barrier will be missing for root pipelines output['startTimeMs'] = _get_timestamp_ms(start_barrier.trigger_time) elif pipeline_record.start_time: # Assume this pipeline ran immediately upon spawning with no # start barrier or it's the root pipeline. output['startTimeMs'] = _get_timestamp_ms(pipeline_record.start_time) if status in ('finalizing',): output['endTimeMs'] = _get_timestamp_ms(default_slot.fill_time) if status in ('done',): output['endTimeMs'] = _get_timestamp_ms(pipeline_record.finalized_time) if pipeline_record.next_retry_time is not None: output['lastRetryMessage'] = pipeline_record.retry_message if pipeline_record.abort_message: output['abortMessage'] = pipeline_record.abort_message return output
Gets the UI dictionary of a pipeline from a set of status dictionaries. Args: pipeline_key: The key of the pipeline to lookup. pipeline_dict: Dictionary mapping pipeline db.Key to _PipelineRecord. Default is an empty dictionary. slot_dict: Dictionary mapping slot db.Key to _SlotRecord. Default is an empty dictionary. barrier_dict: Dictionary mapping barrier db.Key to _BarrierRecord. Default is an empty dictionary. status_dict: Dictionary mapping status record db.Key to _StatusRecord. Default is an empty dictionary. Returns: Dictionary with the keys: classPath: The pipeline function being run. args: List of positional argument slot dictionaries. kwargs: Dictionary of keyword argument slot dictionaries. outputs: Dictionary of output slot dictionaries. children: List of child pipeline IDs. queueName: Queue on which this pipeline is running. afterSlotKeys: List of Slot Ids after which this pipeline runs. currentAttempt: Number of the current attempt, starting at 1. maxAttempts: Maximum number of attempts before aborting. backoffSeconds: Constant factor for backoff before retrying. backoffFactor: Exponential factor for backoff before retrying. status: Current status of the pipeline. startTimeMs: When this pipeline ran or will run due to retries, if present. endTimeMs: When this pipeline finalized, if present. lastRetryMessage: Why the pipeline failed during the last retry, if there was a failure; may be empty. abortMessage: For root pipelines, why the pipeline was aborted if it was aborted; may be empty. Dictionary will contain these keys if explicit status is set: statusTimeMs: When the status was set as milliseconds since the epoch. statusMessage: Status message, if present. statusConsoleUrl: The relative URL for the console of this pipeline. statusLinks: Dictionary mapping human-readable names to relative URLs for related URLs to this pipeline. Raises: PipelineStatusError if any input is bad.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2891-L3056
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_get_internal_slot
def _get_internal_slot(slot_key=None, filler_pipeline_key=None, slot_dict=None): """Gets information about a _SlotRecord for display in UI. Args: slot_key: The db.Key of the slot to fetch. filler_pipeline_key: In the case the slot has not yet been filled, assume that the given db.Key (for a _PipelineRecord) will be the filler of the slot in the future. slot_dict: The slot JSON dictionary. Returns: Dictionary with the keys: status: Slot status: 'filled' or 'waiting' fillTimeMs: Time in milliseconds since the epoch of when it was filled. value: The current value of the slot, which is a slot's JSON dictionary. fillerPipelineId: The pipeline ID of what stage has or should fill this slot. Raises: PipelineStatusError if any input is bad. """ if slot_dict is None: slot_dict = {} slot_record = slot_dict.get(slot_key) if slot_record is None: raise PipelineStatusError( 'Could not find data for output slot key "%s".' % slot_key) output = {} if slot_record.status == _SlotRecord.FILLED: output['status'] = 'filled' output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time) output['value'] = slot_record.value filler_pipeline_key = ( _SlotRecord.filler.get_value_for_datastore(slot_record)) else: output['status'] = 'waiting' if filler_pipeline_key: output['fillerPipelineId'] = filler_pipeline_key.name() return output
python
def _get_internal_slot(slot_key=None, filler_pipeline_key=None, slot_dict=None): """Gets information about a _SlotRecord for display in UI. Args: slot_key: The db.Key of the slot to fetch. filler_pipeline_key: In the case the slot has not yet been filled, assume that the given db.Key (for a _PipelineRecord) will be the filler of the slot in the future. slot_dict: The slot JSON dictionary. Returns: Dictionary with the keys: status: Slot status: 'filled' or 'waiting' fillTimeMs: Time in milliseconds since the epoch of when it was filled. value: The current value of the slot, which is a slot's JSON dictionary. fillerPipelineId: The pipeline ID of what stage has or should fill this slot. Raises: PipelineStatusError if any input is bad. """ if slot_dict is None: slot_dict = {} slot_record = slot_dict.get(slot_key) if slot_record is None: raise PipelineStatusError( 'Could not find data for output slot key "%s".' % slot_key) output = {} if slot_record.status == _SlotRecord.FILLED: output['status'] = 'filled' output['fillTimeMs'] = _get_timestamp_ms(slot_record.fill_time) output['value'] = slot_record.value filler_pipeline_key = ( _SlotRecord.filler.get_value_for_datastore(slot_record)) else: output['status'] = 'waiting' if filler_pipeline_key: output['fillerPipelineId'] = filler_pipeline_key.name() return output
Gets information about a _SlotRecord for display in UI. Args: slot_key: The db.Key of the slot to fetch. filler_pipeline_key: In the case the slot has not yet been filled, assume that the given db.Key (for a _PipelineRecord) will be the filler of the slot in the future. slot_dict: The slot JSON dictionary. Returns: Dictionary with the keys: status: Slot status: 'filled' or 'waiting' fillTimeMs: Time in milliseconds since the epoch of when it was filled. value: The current value of the slot, which is a slot's JSON dictionary. fillerPipelineId: The pipeline ID of what stage has or should fill this slot. Raises: PipelineStatusError if any input is bad.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L3059-L3103
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
get_status_tree
def get_status_tree(root_pipeline_id): """Gets the full status tree of a pipeline. Args: root_pipeline_id: The pipeline ID to get status for. Returns: Dictionary with the keys: rootPipelineId: The ID of the root pipeline. slots: Mapping of slot IDs to result of from _get_internal_slot. pipelines: Mapping of pipeline IDs to result of _get_internal_status. Raises: PipelineStatusError if any input is bad. """ root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id) root_pipeline_record = db.get(root_pipeline_key) if root_pipeline_record is None: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % root_pipeline_id) # If the supplied root_pipeline_id is not actually the root pipeline that's # okay. We'll find the real root and override the value they passed in. actual_root_key = _PipelineRecord.root_pipeline.get_value_for_datastore( root_pipeline_record) if actual_root_key != root_pipeline_key: root_pipeline_key = actual_root_key root_pipeline_id = root_pipeline_key.id_or_name() root_pipeline_record = db.get(root_pipeline_key) if not root_pipeline_record: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % root_pipeline_id) # Run all queries asynchronously. queries = {} for model in (_PipelineRecord, _SlotRecord, _BarrierRecord, _StatusRecord): queries[model] = model.all().filter( 'root_pipeline =', root_pipeline_key).run(batch_size=1000) found_pipeline_dict = dict( (stage.key(), stage) for stage in queries[_PipelineRecord]) found_slot_dict = dict( (slot.key(), slot) for slot in queries[_SlotRecord]) found_barrier_dict = dict( (barrier.key(), barrier) for barrier in queries[_BarrierRecord]) found_status_dict = dict( (status.key(), status) for status in queries[_StatusRecord]) # Breadth-first traversal of _PipelineRecord instances by following # _PipelineRecord.fanned_out property values. valid_pipeline_keys = set([root_pipeline_key]) slot_filler_dict = {} # slot_key to pipeline_key expand_stack = [root_pipeline_record] while expand_stack: old_stack = expand_stack expand_stack = [] for pipeline_record in old_stack: for child_pipeline_key in pipeline_record.fanned_out: # This will let us prune off those pipelines which were allocated in # the Datastore but were never run due to mid-flight task failures. child_pipeline_record = found_pipeline_dict.get(child_pipeline_key) if child_pipeline_record is None: raise PipelineStatusError( 'Pipeline ID "%s" points to child ID "%s" which does not exist.' % (pipeline_record.key().name(), child_pipeline_key.name())) expand_stack.append(child_pipeline_record) valid_pipeline_keys.add(child_pipeline_key) # Figure out the deepest pipeline that's responsible for outputting to # a particular _SlotRecord, so we can report which pipeline *should* # be the filler. child_outputs = child_pipeline_record.params['output_slots'] for output_slot_key in child_outputs.itervalues(): slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key output = { 'rootPipelineId': root_pipeline_id, 'slots': {}, 'pipelines': {}, } for pipeline_key in found_pipeline_dict.keys(): if pipeline_key not in valid_pipeline_keys: continue output['pipelines'][pipeline_key.name()] = _get_internal_status( pipeline_key=pipeline_key, pipeline_dict=found_pipeline_dict, slot_dict=found_slot_dict, barrier_dict=found_barrier_dict, status_dict=found_status_dict) for slot_key, filler_pipeline_key in slot_filler_dict.iteritems(): output['slots'][str(slot_key)] = _get_internal_slot( slot_key=slot_key, filler_pipeline_key=filler_pipeline_key, slot_dict=found_slot_dict) return output
python
def get_status_tree(root_pipeline_id): """Gets the full status tree of a pipeline. Args: root_pipeline_id: The pipeline ID to get status for. Returns: Dictionary with the keys: rootPipelineId: The ID of the root pipeline. slots: Mapping of slot IDs to result of from _get_internal_slot. pipelines: Mapping of pipeline IDs to result of _get_internal_status. Raises: PipelineStatusError if any input is bad. """ root_pipeline_key = db.Key.from_path(_PipelineRecord.kind(), root_pipeline_id) root_pipeline_record = db.get(root_pipeline_key) if root_pipeline_record is None: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % root_pipeline_id) # If the supplied root_pipeline_id is not actually the root pipeline that's # okay. We'll find the real root and override the value they passed in. actual_root_key = _PipelineRecord.root_pipeline.get_value_for_datastore( root_pipeline_record) if actual_root_key != root_pipeline_key: root_pipeline_key = actual_root_key root_pipeline_id = root_pipeline_key.id_or_name() root_pipeline_record = db.get(root_pipeline_key) if not root_pipeline_record: raise PipelineStatusError( 'Could not find pipeline ID "%s"' % root_pipeline_id) # Run all queries asynchronously. queries = {} for model in (_PipelineRecord, _SlotRecord, _BarrierRecord, _StatusRecord): queries[model] = model.all().filter( 'root_pipeline =', root_pipeline_key).run(batch_size=1000) found_pipeline_dict = dict( (stage.key(), stage) for stage in queries[_PipelineRecord]) found_slot_dict = dict( (slot.key(), slot) for slot in queries[_SlotRecord]) found_barrier_dict = dict( (barrier.key(), barrier) for barrier in queries[_BarrierRecord]) found_status_dict = dict( (status.key(), status) for status in queries[_StatusRecord]) # Breadth-first traversal of _PipelineRecord instances by following # _PipelineRecord.fanned_out property values. valid_pipeline_keys = set([root_pipeline_key]) slot_filler_dict = {} # slot_key to pipeline_key expand_stack = [root_pipeline_record] while expand_stack: old_stack = expand_stack expand_stack = [] for pipeline_record in old_stack: for child_pipeline_key in pipeline_record.fanned_out: # This will let us prune off those pipelines which were allocated in # the Datastore but were never run due to mid-flight task failures. child_pipeline_record = found_pipeline_dict.get(child_pipeline_key) if child_pipeline_record is None: raise PipelineStatusError( 'Pipeline ID "%s" points to child ID "%s" which does not exist.' % (pipeline_record.key().name(), child_pipeline_key.name())) expand_stack.append(child_pipeline_record) valid_pipeline_keys.add(child_pipeline_key) # Figure out the deepest pipeline that's responsible for outputting to # a particular _SlotRecord, so we can report which pipeline *should* # be the filler. child_outputs = child_pipeline_record.params['output_slots'] for output_slot_key in child_outputs.itervalues(): slot_filler_dict[db.Key(output_slot_key)] = child_pipeline_key output = { 'rootPipelineId': root_pipeline_id, 'slots': {}, 'pipelines': {}, } for pipeline_key in found_pipeline_dict.keys(): if pipeline_key not in valid_pipeline_keys: continue output['pipelines'][pipeline_key.name()] = _get_internal_status( pipeline_key=pipeline_key, pipeline_dict=found_pipeline_dict, slot_dict=found_slot_dict, barrier_dict=found_barrier_dict, status_dict=found_status_dict) for slot_key, filler_pipeline_key in slot_filler_dict.iteritems(): output['slots'][str(slot_key)] = _get_internal_slot( slot_key=slot_key, filler_pipeline_key=filler_pipeline_key, slot_dict=found_slot_dict) return output
Gets the full status tree of a pipeline. Args: root_pipeline_id: The pipeline ID to get status for. Returns: Dictionary with the keys: rootPipelineId: The ID of the root pipeline. slots: Mapping of slot IDs to result of from _get_internal_slot. pipelines: Mapping of pipeline IDs to result of _get_internal_status. Raises: PipelineStatusError if any input is bad.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L3106-L3203
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
get_pipeline_names
def get_pipeline_names(): """Returns the class paths of all Pipelines defined in alphabetical order.""" class_path_set = set() for cls in _PipelineMeta._all_classes: if cls.class_path is not None: class_path_set.add(cls.class_path) return sorted(class_path_set)
python
def get_pipeline_names(): """Returns the class paths of all Pipelines defined in alphabetical order.""" class_path_set = set() for cls in _PipelineMeta._all_classes: if cls.class_path is not None: class_path_set.add(cls.class_path) return sorted(class_path_set)
Returns the class paths of all Pipelines defined in alphabetical order.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L3206-L3212
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
get_root_list
def get_root_list(class_path=None, cursor=None, count=50): """Gets a list root Pipelines. Args: class_path: Optional. If supplied, only return root Pipelines with the given class_path. By default all root pipelines are returned. cursor: Optional. When supplied, the cursor returned from the last call to get_root_list which indicates where to pick up. count: How many pipeline returns to return. Returns: Dictionary with the keys: pipelines: The list of Pipeline records in the same format as returned by get_status_tree, but with only the roots listed. cursor: Cursor to pass back to this function to resume the query. Will only be present if there is another page of results. Raises: PipelineStatusError if any input is bad. """ query = _PipelineRecord.all(cursor=cursor) if class_path: query.filter('class_path =', class_path) query.filter('is_root_pipeline =', True) query.order('-start_time') root_list = query.fetch(count) fetch_list = [] for pipeline_record in root_list: fetch_list.append(db.Key(pipeline_record.params['output_slots']['default'])) fetch_list.append(db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_record.key())) fetch_list.append(db.Key.from_path( _StatusRecord.kind(), pipeline_record.key().name())) pipeline_dict = dict((stage.key(), stage) for stage in root_list) slot_dict = {} barrier_dict = {} status_dict = {} for entity in db.get(fetch_list): if isinstance(entity, _BarrierRecord): barrier_dict[entity.key()] = entity elif isinstance(entity, _SlotRecord): slot_dict[entity.key()] = entity elif isinstance(entity, _StatusRecord): status_dict[entity.key()] = entity results = [] for pipeline_record in root_list: try: output = _get_internal_status( pipeline_record.key(), pipeline_dict=pipeline_dict, slot_dict=slot_dict, barrier_dict=barrier_dict, status_dict=status_dict) output['pipelineId'] = pipeline_record.key().name() results.append(output) except PipelineStatusError, e: output = {'status': e.message} output['classPath'] = '' output['pipelineId'] = pipeline_record.key().name() results.append(output) result_dict = {} cursor = query.cursor() query.with_cursor(cursor) if query.get(keys_only=True): result_dict.update(cursor=cursor) result_dict.update(pipelines=results) return result_dict
python
def get_root_list(class_path=None, cursor=None, count=50): """Gets a list root Pipelines. Args: class_path: Optional. If supplied, only return root Pipelines with the given class_path. By default all root pipelines are returned. cursor: Optional. When supplied, the cursor returned from the last call to get_root_list which indicates where to pick up. count: How many pipeline returns to return. Returns: Dictionary with the keys: pipelines: The list of Pipeline records in the same format as returned by get_status_tree, but with only the roots listed. cursor: Cursor to pass back to this function to resume the query. Will only be present if there is another page of results. Raises: PipelineStatusError if any input is bad. """ query = _PipelineRecord.all(cursor=cursor) if class_path: query.filter('class_path =', class_path) query.filter('is_root_pipeline =', True) query.order('-start_time') root_list = query.fetch(count) fetch_list = [] for pipeline_record in root_list: fetch_list.append(db.Key(pipeline_record.params['output_slots']['default'])) fetch_list.append(db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_record.key())) fetch_list.append(db.Key.from_path( _StatusRecord.kind(), pipeline_record.key().name())) pipeline_dict = dict((stage.key(), stage) for stage in root_list) slot_dict = {} barrier_dict = {} status_dict = {} for entity in db.get(fetch_list): if isinstance(entity, _BarrierRecord): barrier_dict[entity.key()] = entity elif isinstance(entity, _SlotRecord): slot_dict[entity.key()] = entity elif isinstance(entity, _StatusRecord): status_dict[entity.key()] = entity results = [] for pipeline_record in root_list: try: output = _get_internal_status( pipeline_record.key(), pipeline_dict=pipeline_dict, slot_dict=slot_dict, barrier_dict=barrier_dict, status_dict=status_dict) output['pipelineId'] = pipeline_record.key().name() results.append(output) except PipelineStatusError, e: output = {'status': e.message} output['classPath'] = '' output['pipelineId'] = pipeline_record.key().name() results.append(output) result_dict = {} cursor = query.cursor() query.with_cursor(cursor) if query.get(keys_only=True): result_dict.update(cursor=cursor) result_dict.update(pipelines=results) return result_dict
Gets a list root Pipelines. Args: class_path: Optional. If supplied, only return root Pipelines with the given class_path. By default all root pipelines are returned. cursor: Optional. When supplied, the cursor returned from the last call to get_root_list which indicates where to pick up. count: How many pipeline returns to return. Returns: Dictionary with the keys: pipelines: The list of Pipeline records in the same format as returned by get_status_tree, but with only the roots listed. cursor: Cursor to pass back to this function to resume the query. Will only be present if there is another page of results. Raises: PipelineStatusError if any input is bad.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L3215-L3287
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
create_handlers_map
def create_handlers_map(prefix='.*'): """Create new handlers map. Args: prefix: url prefix to use. Returns: list of (regexp, handler) pairs for WSGIApplication constructor. """ return [ (prefix + '/output', _BarrierHandler), (prefix + '/run', _PipelineHandler), (prefix + '/finalized', _PipelineHandler), (prefix + '/cleanup', _CleanupHandler), (prefix + '/abort', _PipelineHandler), (prefix + '/fanout', _FanoutHandler), (prefix + '/fanout_abort', _FanoutAbortHandler), (prefix + '/callback', _CallbackHandler), (prefix + '/rpc/tree', status_ui._TreeStatusHandler), (prefix + '/rpc/class_paths', status_ui._ClassPathListHandler), (prefix + '/rpc/list', status_ui._RootListHandler), (prefix + '(/.+)', status_ui._StatusUiHandler), ]
python
def create_handlers_map(prefix='.*'): """Create new handlers map. Args: prefix: url prefix to use. Returns: list of (regexp, handler) pairs for WSGIApplication constructor. """ return [ (prefix + '/output', _BarrierHandler), (prefix + '/run', _PipelineHandler), (prefix + '/finalized', _PipelineHandler), (prefix + '/cleanup', _CleanupHandler), (prefix + '/abort', _PipelineHandler), (prefix + '/fanout', _FanoutHandler), (prefix + '/fanout_abort', _FanoutAbortHandler), (prefix + '/callback', _CallbackHandler), (prefix + '/rpc/tree', status_ui._TreeStatusHandler), (prefix + '/rpc/class_paths', status_ui._ClassPathListHandler), (prefix + '/rpc/list', status_ui._RootListHandler), (prefix + '(/.+)', status_ui._StatusUiHandler), ]
Create new handlers map. Args: prefix: url prefix to use. Returns: list of (regexp, handler) pairs for WSGIApplication constructor.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L3303-L3325
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Slot.value
def value(self): """Returns the current value of this slot. Returns: The value of the slot (a serializable Python type). Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._value
python
def value(self): """Returns the current value of this slot. Returns: The value of the slot (a serializable Python type). Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._value
Returns the current value of this slot. Returns: The value of the slot (a serializable Python type). Raises: SlotNotFilledError if the value hasn't been filled yet.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L203-L215
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Slot.filler
def filler(self): """Returns the pipeline ID that filled this slot's value. Returns: A string that is the pipeline ID. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._filler_pipeline_key.name()
python
def filler(self): """Returns the pipeline ID that filled this slot's value. Returns: A string that is the pipeline ID. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._filler_pipeline_key.name()
Returns the pipeline ID that filled this slot's value. Returns: A string that is the pipeline ID. Raises: SlotNotFilledError if the value hasn't been filled yet.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L218-L230
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Slot.fill_datetime
def fill_datetime(self): """Returns when the slot was filled. Returns: A datetime.datetime. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._fill_datetime
python
def fill_datetime(self): """Returns when the slot was filled. Returns: A datetime.datetime. Raises: SlotNotFilledError if the value hasn't been filled yet. """ if not self.filled: raise SlotNotFilledError('Slot with name "%s", key "%s" not yet filled.' % (self.name, self.key)) return self._fill_datetime
Returns when the slot was filled. Returns: A datetime.datetime. Raises: SlotNotFilledError if the value hasn't been filled yet.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L233-L245
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Slot._set_value
def _set_value(self, slot_record): """Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value. """ if slot_record.status == _SlotRecord.FILLED: self.filled = True self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore( slot_record) self._fill_datetime = slot_record.fill_time self._value = slot_record.value
python
def _set_value(self, slot_record): """Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value. """ if slot_record.status == _SlotRecord.FILLED: self.filled = True self._filler_pipeline_key = _SlotRecord.filler.get_value_for_datastore( slot_record) self._fill_datetime = slot_record.fill_time self._value = slot_record.value
Sets the value of this slot based on its corresponding _SlotRecord. Does nothing if the slot has not yet been filled. Args: slot_record: The _SlotRecord containing this Slot's value.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L247-L260
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
PipelineFuture._inherit_outputs
def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False): """Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore. """ for name, slot_key in already_defined.iteritems(): if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) slot = self._output_dict.get(name) if slot is None: if self._strict: raise UnexpectedPipelineError( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % (name, pipeline_name)) else: self._output_dict[name] = Slot(name=name, slot_key=slot_key) else: slot.key = slot_key slot._exists = True if resolve_outputs: slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues()) all_slots = db.get(slot_key_dict.keys()) for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots): if slot_record is None: raise UnexpectedPipelineError( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)) slot = slot_key_dict[slot_record.key()] slot._set_value(slot_record)
python
def _inherit_outputs(self, pipeline_name, already_defined, resolve_outputs=False): """Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore. """ for name, slot_key in already_defined.iteritems(): if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) slot = self._output_dict.get(name) if slot is None: if self._strict: raise UnexpectedPipelineError( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % (name, pipeline_name)) else: self._output_dict[name] = Slot(name=name, slot_key=slot_key) else: slot.key = slot_key slot._exists = True if resolve_outputs: slot_key_dict = dict((s.key, s) for s in self._output_dict.itervalues()) all_slots = db.get(slot_key_dict.keys()) for slot, slot_record in zip(slot_key_dict.itervalues(), all_slots): if slot_record is None: raise UnexpectedPipelineError( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % (slot.name, pipeline_name, slot.key)) slot = slot_key_dict[slot_record.key()] slot._set_value(slot_record)
Inherits outputs from a calling Pipeline. Args: pipeline_name: The Pipeline class name (used for debugging). already_defined: Maps output name to stringified db.Key (of _SlotRecords) of any exiting output slots to be inherited by this future. resolve_outputs: When True, this method will dereference all output slots before returning back to the caller, making those output slots' values available. Raises: UnexpectedPipelineError when resolve_outputs is True and any of the output slots could not be retrived from the Datastore.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L314-L358
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.from_id
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None): """Returns an instance corresponding to an existing Pipeline. The returned object will have the same properties a Pipeline does while it's running synchronously (e.g., like what it's first allocated), allowing callers to inspect caller arguments, outputs, fill slots, complete the pipeline, abort, retry, etc. Args: pipeline_id: The ID of this pipeline (a string). resolve_outputs: When True, dereference the outputs of this Pipeline so their values can be accessed by the caller. _pipeline_record: Internal-only. The _PipelineRecord instance to use to instantiate this instance instead of fetching it from the datastore. Returns: Pipeline sub-class instances or None if it could not be found. """ pipeline_record = _pipeline_record # Support pipeline IDs and idempotence_keys that are not unicode. if not isinstance(pipeline_id, unicode): try: pipeline_id = pipeline_id.encode('utf-8') except UnicodeDecodeError: pipeline_id = hashlib.sha1(pipeline_id).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) if pipeline_record is None: pipeline_record = db.get(pipeline_key) if pipeline_record is None: return None try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: logging.warning('Tried to find Pipeline %s#%s, but class could ' 'not be found. Using default Pipeline class instead.', pipeline_record.class_path, pipeline_id) pipeline_func_class = cls params = pipeline_record.params arg_list, kwarg_dict = _dereference_args( pipeline_record.class_path, params['args'], params['kwargs']) outputs = PipelineFuture(pipeline_func_class.output_names) outputs._inherit_outputs( pipeline_record.class_path, params['output_slots'], resolve_outputs=resolve_outputs) stage = pipeline_func_class(*arg_list, **kwarg_dict) stage.backoff_seconds = params['backoff_seconds'] stage.backoff_factor = params['backoff_factor'] stage.max_attempts = params['max_attempts'] stage.task_retry = params['task_retry'] stage.target = params.get('target') # May not be defined for old Pipelines stage._current_attempt = pipeline_record.current_attempt stage._set_values_internal( _PipelineContext('', params['queue_name'], params['base_path']), pipeline_key, _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record), outputs, pipeline_record.status) return stage
python
def from_id(cls, pipeline_id, resolve_outputs=True, _pipeline_record=None): """Returns an instance corresponding to an existing Pipeline. The returned object will have the same properties a Pipeline does while it's running synchronously (e.g., like what it's first allocated), allowing callers to inspect caller arguments, outputs, fill slots, complete the pipeline, abort, retry, etc. Args: pipeline_id: The ID of this pipeline (a string). resolve_outputs: When True, dereference the outputs of this Pipeline so their values can be accessed by the caller. _pipeline_record: Internal-only. The _PipelineRecord instance to use to instantiate this instance instead of fetching it from the datastore. Returns: Pipeline sub-class instances or None if it could not be found. """ pipeline_record = _pipeline_record # Support pipeline IDs and idempotence_keys that are not unicode. if not isinstance(pipeline_id, unicode): try: pipeline_id = pipeline_id.encode('utf-8') except UnicodeDecodeError: pipeline_id = hashlib.sha1(pipeline_id).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) if pipeline_record is None: pipeline_record = db.get(pipeline_key) if pipeline_record is None: return None try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: logging.warning('Tried to find Pipeline %s#%s, but class could ' 'not be found. Using default Pipeline class instead.', pipeline_record.class_path, pipeline_id) pipeline_func_class = cls params = pipeline_record.params arg_list, kwarg_dict = _dereference_args( pipeline_record.class_path, params['args'], params['kwargs']) outputs = PipelineFuture(pipeline_func_class.output_names) outputs._inherit_outputs( pipeline_record.class_path, params['output_slots'], resolve_outputs=resolve_outputs) stage = pipeline_func_class(*arg_list, **kwarg_dict) stage.backoff_seconds = params['backoff_seconds'] stage.backoff_factor = params['backoff_factor'] stage.max_attempts = params['max_attempts'] stage.task_retry = params['task_retry'] stage.target = params.get('target') # May not be defined for old Pipelines stage._current_attempt = pipeline_record.current_attempt stage._set_values_internal( _PipelineContext('', params['queue_name'], params['base_path']), pipeline_key, _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record), outputs, pipeline_record.status) return stage
Returns an instance corresponding to an existing Pipeline. The returned object will have the same properties a Pipeline does while it's running synchronously (e.g., like what it's first allocated), allowing callers to inspect caller arguments, outputs, fill slots, complete the pipeline, abort, retry, etc. Args: pipeline_id: The ID of this pipeline (a string). resolve_outputs: When True, dereference the outputs of this Pipeline so their values can be accessed by the caller. _pipeline_record: Internal-only. The _PipelineRecord instance to use to instantiate this instance instead of fetching it from the datastore. Returns: Pipeline sub-class instances or None if it could not be found.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L544-L609
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.start
def start(self, idempotence_key='', queue_name='default', base_path='/_ah/pipeline', return_task=False, countdown=None, eta=None): """Starts a new instance of this pipeline. Args: idempotence_key: The ID to use for this Pipeline and throughout its asynchronous workflow to ensure the operations are idempotent. If empty a starting key will be automatically assigned. queue_name: What queue this Pipeline's workflow should execute on. base_path: The relative URL path to where the Pipeline API is mounted for access by the taskqueue API or external requests. return_task: When True, a task to start this pipeline will be returned instead of submitted, allowing the caller to start off this pipeline as part of a separate transaction (potentially leaving this newly allocated pipeline's datastore entities in place if that separate transaction fails for any reason). countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: A taskqueue.Task instance if return_task was True. This task will *not* have a name, thus to ensure reliable execution of your pipeline you should add() this task as part of a separate Datastore transaction. Raises: PipelineExistsError if the pipeline with the given idempotence key exists. PipelineSetupError if the pipeline could not start for any other reason. """ if not idempotence_key: idempotence_key = uuid.uuid4().hex elif not isinstance(idempotence_key, unicode): try: idempotence_key.encode('utf-8') except UnicodeDecodeError: idempotence_key = hashlib.sha1(idempotence_key).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', queue_name, base_path) future = PipelineFuture(self.output_names, force_strict=True) try: self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) return context.start( self, return_task=return_task, countdown=countdown, eta=eta) except Error: # Pass through exceptions that originate in this module. raise except Exception, e: # Re-type any exceptions that were raised in dependent methods. raise PipelineSetupError('Error starting %s#%s: %s' % ( self, idempotence_key, str(e)))
python
def start(self, idempotence_key='', queue_name='default', base_path='/_ah/pipeline', return_task=False, countdown=None, eta=None): """Starts a new instance of this pipeline. Args: idempotence_key: The ID to use for this Pipeline and throughout its asynchronous workflow to ensure the operations are idempotent. If empty a starting key will be automatically assigned. queue_name: What queue this Pipeline's workflow should execute on. base_path: The relative URL path to where the Pipeline API is mounted for access by the taskqueue API or external requests. return_task: When True, a task to start this pipeline will be returned instead of submitted, allowing the caller to start off this pipeline as part of a separate transaction (potentially leaving this newly allocated pipeline's datastore entities in place if that separate transaction fails for any reason). countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: A taskqueue.Task instance if return_task was True. This task will *not* have a name, thus to ensure reliable execution of your pipeline you should add() this task as part of a separate Datastore transaction. Raises: PipelineExistsError if the pipeline with the given idempotence key exists. PipelineSetupError if the pipeline could not start for any other reason. """ if not idempotence_key: idempotence_key = uuid.uuid4().hex elif not isinstance(idempotence_key, unicode): try: idempotence_key.encode('utf-8') except UnicodeDecodeError: idempotence_key = hashlib.sha1(idempotence_key).hexdigest() pipeline_key = db.Key.from_path(_PipelineRecord.kind(), idempotence_key) context = _PipelineContext('', queue_name, base_path) future = PipelineFuture(self.output_names, force_strict=True) try: self._set_values_internal( context, pipeline_key, pipeline_key, future, _PipelineRecord.WAITING) return context.start( self, return_task=return_task, countdown=countdown, eta=eta) except Error: # Pass through exceptions that originate in this module. raise except Exception, e: # Re-type any exceptions that were raised in dependent methods. raise PipelineSetupError('Error starting %s#%s: %s' % ( self, idempotence_key, str(e)))
Starts a new instance of this pipeline. Args: idempotence_key: The ID to use for this Pipeline and throughout its asynchronous workflow to ensure the operations are idempotent. If empty a starting key will be automatically assigned. queue_name: What queue this Pipeline's workflow should execute on. base_path: The relative URL path to where the Pipeline API is mounted for access by the taskqueue API or external requests. return_task: When True, a task to start this pipeline will be returned instead of submitted, allowing the caller to start off this pipeline as part of a separate transaction (potentially leaving this newly allocated pipeline's datastore entities in place if that separate transaction fails for any reason). countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: A taskqueue.Task instance if return_task was True. This task will *not* have a name, thus to ensure reliable execution of your pipeline you should add() this task as part of a separate Datastore transaction. Raises: PipelineExistsError if the pipeline with the given idempotence key exists. PipelineSetupError if the pipeline could not start for any other reason.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L613-L673
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.retry
def retry(self, retry_message=''): """Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason. """ if not self.async: raise UnexpectedPipelineError( 'May only call retry() method for asynchronous pipelines.') if self.try_cancel(): self._context.transition_retry(self._pipeline_key, retry_message) return True else: return False
python
def retry(self, retry_message=''): """Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason. """ if not self.async: raise UnexpectedPipelineError( 'May only call retry() method for asynchronous pipelines.') if self.try_cancel(): self._context.transition_retry(self._pipeline_key, retry_message) return True else: return False
Forces a currently running asynchronous pipeline to retry. Note this may not be called by synchronous or generator pipelines. Those must instead raise the 'Retry' exception during execution. Args: retry_message: Optional message explaining why the retry happened. Returns: True if the Pipeline should be retried, False if it cannot be cancelled mid-flight for some reason.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L693-L713
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.abort
def abort(self, abort_message=''): """Mark the entire pipeline up to the root as aborted. Note this should only be called from *outside* the context of a running pipeline. Synchronous and generator pipelines should raise the 'Abort' exception to cause this behavior during execution. Args: abort_message: Optional message explaining why the abort happened. Returns: True if the abort signal was sent successfully; False if the pipeline could not be aborted for any reason. """ # TODO: Use thread-local variable to enforce that this is not called # while a pipeline is executing in the current thread. if (self.async and self._root_pipeline_key == self._pipeline_key and not self.try_cancel()): # Handle the special case where the root pipeline is async and thus # cannot be aborted outright. return False else: return self._context.begin_abort( self._root_pipeline_key, abort_message=abort_message)
python
def abort(self, abort_message=''): """Mark the entire pipeline up to the root as aborted. Note this should only be called from *outside* the context of a running pipeline. Synchronous and generator pipelines should raise the 'Abort' exception to cause this behavior during execution. Args: abort_message: Optional message explaining why the abort happened. Returns: True if the abort signal was sent successfully; False if the pipeline could not be aborted for any reason. """ # TODO: Use thread-local variable to enforce that this is not called # while a pipeline is executing in the current thread. if (self.async and self._root_pipeline_key == self._pipeline_key and not self.try_cancel()): # Handle the special case where the root pipeline is async and thus # cannot be aborted outright. return False else: return self._context.begin_abort( self._root_pipeline_key, abort_message=abort_message)
Mark the entire pipeline up to the root as aborted. Note this should only be called from *outside* the context of a running pipeline. Synchronous and generator pipelines should raise the 'Abort' exception to cause this behavior during execution. Args: abort_message: Optional message explaining why the abort happened. Returns: True if the abort signal was sent successfully; False if the pipeline could not be aborted for any reason.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L715-L738
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.fill
def fill(self, name_or_slot, value): """Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. """ if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value)
python
def fill(self, name_or_slot, value): """Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time. """ if isinstance(name_or_slot, basestring): slot = getattr(self.outputs, name_or_slot) elif isinstance(name_or_slot, Slot): slot = name_or_slot else: raise UnexpectedPipelineError( 'Could not fill invalid output name: %r' % name_or_slot) if not slot._exists: raise SlotNotDeclaredError( 'Cannot fill output with name "%s" that was just ' 'declared within the Pipeline context.' % slot.name) self._context.fill_slot(self._pipeline_key, slot, value)
Fills an output slot required by this Pipeline. Args: name_or_slot: The name of the slot (a string) or Slot record to fill. value: The serializable value to assign to this slot. Raises: UnexpectedPipelineError if the Slot no longer exists. SlotNotDeclaredError if trying to output to a slot that was not declared ahead of time.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L741-L765
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.set_status
def set_status(self, message=None, console_url=None, status_links=None): """Sets the current status of this pipeline. This method is purposefully non-transactional. Updates are written to the datastore immediately and overwrite all existing statuses. Args: message: (optional) Overall status message. console_url: (optional) Relative URL to use for the "console" of this pipeline that displays current progress. When None, no console will be displayed. status_links: (optional) Dictionary of readable link names to relative URLs that should be associated with this pipeline as it runs. These links provide convenient access to other dashboards, consoles, etc associated with the pipeline. Raises: PipelineRuntimeError if the status could not be set for any reason. """ if _TEST_MODE: logging.info( 'New status for %s#%s: message=%r, console_url=%r, status_links=%r', self, self.pipeline_id, message, console_url, status_links) return status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id) root_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), self.root_pipeline_id) status_record = _StatusRecord( key=status_key, root_pipeline=root_pipeline_key) try: if message: status_record.message = message if console_url: status_record.console_url = console_url if status_links: # Alphabeticalize the list. status_record.link_names = sorted( db.Text(s) for s in status_links.iterkeys()) status_record.link_urls = [ db.Text(status_links[name]) for name in status_record.link_names] status_record.status_time = datetime.datetime.utcnow() status_record.put() except Exception, e: raise PipelineRuntimeError('Could not set status for %s#%s: %s' % (self, self.pipeline_id, str(e)))
python
def set_status(self, message=None, console_url=None, status_links=None): """Sets the current status of this pipeline. This method is purposefully non-transactional. Updates are written to the datastore immediately and overwrite all existing statuses. Args: message: (optional) Overall status message. console_url: (optional) Relative URL to use for the "console" of this pipeline that displays current progress. When None, no console will be displayed. status_links: (optional) Dictionary of readable link names to relative URLs that should be associated with this pipeline as it runs. These links provide convenient access to other dashboards, consoles, etc associated with the pipeline. Raises: PipelineRuntimeError if the status could not be set for any reason. """ if _TEST_MODE: logging.info( 'New status for %s#%s: message=%r, console_url=%r, status_links=%r', self, self.pipeline_id, message, console_url, status_links) return status_key = db.Key.from_path(_StatusRecord.kind(), self.pipeline_id) root_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), self.root_pipeline_id) status_record = _StatusRecord( key=status_key, root_pipeline=root_pipeline_key) try: if message: status_record.message = message if console_url: status_record.console_url = console_url if status_links: # Alphabeticalize the list. status_record.link_names = sorted( db.Text(s) for s in status_links.iterkeys()) status_record.link_urls = [ db.Text(status_links[name]) for name in status_record.link_names] status_record.status_time = datetime.datetime.utcnow() status_record.put() except Exception, e: raise PipelineRuntimeError('Could not set status for %s#%s: %s' % (self, self.pipeline_id, str(e)))
Sets the current status of this pipeline. This method is purposefully non-transactional. Updates are written to the datastore immediately and overwrite all existing statuses. Args: message: (optional) Overall status message. console_url: (optional) Relative URL to use for the "console" of this pipeline that displays current progress. When None, no console will be displayed. status_links: (optional) Dictionary of readable link names to relative URLs that should be associated with this pipeline as it runs. These links provide convenient access to other dashboards, consoles, etc associated with the pipeline. Raises: PipelineRuntimeError if the status could not be set for any reason.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L767-L815
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.complete
def complete(self, default_output=None): """Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async. """ # TODO: Enforce that all outputs expected by this async pipeline were # filled before this complete() function was called. May required all # async functions to declare their outputs upfront. if not self.async: raise UnexpectedPipelineError( 'May only call complete() method for asynchronous pipelines.') self._context.fill_slot( self._pipeline_key, self.outputs.default, default_output)
python
def complete(self, default_output=None): """Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async. """ # TODO: Enforce that all outputs expected by this async pipeline were # filled before this complete() function was called. May required all # async functions to declare their outputs upfront. if not self.async: raise UnexpectedPipelineError( 'May only call complete() method for asynchronous pipelines.') self._context.fill_slot( self._pipeline_key, self.outputs.default, default_output)
Marks this asynchronous Pipeline as complete. Args: default_output: What value the 'default' output slot should be assigned. Raises: UnexpectedPipelineError if the slot no longer exists or this method was called for a pipeline that is not async.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L817-L834
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.get_callback_url
def get_callback_url(self, **kwargs): """Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async. """ # TODO: Support positional parameters. if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_url() method for asynchronous pipelines.') kwargs['pipeline_id'] = self._pipeline_key.name() params = urllib.urlencode(sorted(kwargs.items())) return '%s/callback?%s' % (self.base_path, params)
python
def get_callback_url(self, **kwargs): """Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async. """ # TODO: Support positional parameters. if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_url() method for asynchronous pipelines.') kwargs['pipeline_id'] = self._pipeline_key.name() params = urllib.urlencode(sorted(kwargs.items())) return '%s/callback?%s' % (self.base_path, params)
Returns a relative URL for invoking this Pipeline's callback method. Args: kwargs: Dictionary mapping keyword argument names to single values that should be passed to the callback when it is invoked. Raises: UnexpectedPipelineError if this is invoked on pipeline that is not async.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L836-L852
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.get_callback_task
def get_callback_task(self, *args, **kwargs): """Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller. """ if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_task() method for asynchronous pipelines.') params = kwargs.get('params', {}) kwargs['params'] = params params['pipeline_id'] = self._pipeline_key.name() kwargs['url'] = self.base_path + '/callback' kwargs['method'] = 'POST' return taskqueue.Task(*args, **kwargs)
python
def get_callback_task(self, *args, **kwargs): """Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller. """ if not self.async: raise UnexpectedPipelineError( 'May only call get_callback_task() method for asynchronous pipelines.') params = kwargs.get('params', {}) kwargs['params'] = params params['pipeline_id'] = self._pipeline_key.name() kwargs['url'] = self.base_path + '/callback' kwargs['method'] = 'POST' return taskqueue.Task(*args, **kwargs)
Returns a task for calling back this Pipeline. Args: params: Keyword argument containing a dictionary of key/value pairs that will be passed to the callback when it is executed. args, kwargs: Passed to the taskqueue.Task constructor. Use these arguments to set the task name (for idempotence), etc. Returns: A taskqueue.Task instance that must be enqueued by the caller.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L854-L875
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.send_result_email
def send_result_email(self, sender=None): """Sends an email to admins indicating this Pipeline has completed. For developer convenience. Automatically called from finalized for root Pipelines that do not override the default action. Args: sender: (optional) Override the sender's email address. """ status = 'successful' if self.was_aborted: status = 'aborted' app_id = os.environ['APPLICATION_ID'] shard_index = app_id.find('~') if shard_index != -1: app_id = app_id[shard_index+1:] param_dict = { 'status': status, 'app_id': app_id, 'class_path': self._class_path, 'pipeline_id': self.root_pipeline_id, 'base_path': '%s.appspot.com%s' % (app_id, self.base_path), } subject = ( 'Pipeline %(status)s: App "%(app_id)s", %(class_path)s' '#%(pipeline_id)s' % param_dict) body = """View the pipeline results here: http://%(base_path)s/status?root=%(pipeline_id)s Thanks, The Pipeline API """ % param_dict html = """<html><body> <p>View the pipeline results here:</p> <p><a href="http://%(base_path)s/status?root=%(pipeline_id)s" >http://%(base_path)s/status?root=%(pipeline_id)s</a></p> <p> Thanks, <br> The Pipeline API </p> </body></html> """ % param_dict if sender is None: sender = '%s@%s.appspotmail.com' % (app_id, app_id) try: self._send_mail(sender, subject, body, html=html) except (mail.InvalidSenderError, mail.InvalidEmailError): logging.warning('Could not send result email for ' 'root pipeline ID "%s" from sender "%s"', self.root_pipeline_id, sender)
python
def send_result_email(self, sender=None): """Sends an email to admins indicating this Pipeline has completed. For developer convenience. Automatically called from finalized for root Pipelines that do not override the default action. Args: sender: (optional) Override the sender's email address. """ status = 'successful' if self.was_aborted: status = 'aborted' app_id = os.environ['APPLICATION_ID'] shard_index = app_id.find('~') if shard_index != -1: app_id = app_id[shard_index+1:] param_dict = { 'status': status, 'app_id': app_id, 'class_path': self._class_path, 'pipeline_id': self.root_pipeline_id, 'base_path': '%s.appspot.com%s' % (app_id, self.base_path), } subject = ( 'Pipeline %(status)s: App "%(app_id)s", %(class_path)s' '#%(pipeline_id)s' % param_dict) body = """View the pipeline results here: http://%(base_path)s/status?root=%(pipeline_id)s Thanks, The Pipeline API """ % param_dict html = """<html><body> <p>View the pipeline results here:</p> <p><a href="http://%(base_path)s/status?root=%(pipeline_id)s" >http://%(base_path)s/status?root=%(pipeline_id)s</a></p> <p> Thanks, <br> The Pipeline API </p> </body></html> """ % param_dict if sender is None: sender = '%s@%s.appspotmail.com' % (app_id, app_id) try: self._send_mail(sender, subject, body, html=html) except (mail.InvalidSenderError, mail.InvalidEmailError): logging.warning('Could not send result email for ' 'root pipeline ID "%s" from sender "%s"', self.root_pipeline_id, sender)
Sends an email to admins indicating this Pipeline has completed. For developer convenience. Automatically called from finalized for root Pipelines that do not override the default action. Args: sender: (optional) Override the sender's email address.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L877-L935
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.cleanup
def cleanup(self): """Clean up this Pipeline and all Datastore records used for coordination. Only works when called on a root pipeline. Child pipelines will ignore calls to this method. After this method is called, Pipeline.from_id() and related status methods will return inconsistent or missing results. This method is fire-and-forget and asynchronous. """ if self._root_pipeline_key is None: raise UnexpectedPipelineError( 'Could not cleanup Pipeline with unknown root pipeline ID.') if not self.is_root: return task = taskqueue.Task( params=dict(root_pipeline_key=self._root_pipeline_key), url=self.base_path + '/cleanup', headers={'X-Ae-Pipeline-Key': self._root_pipeline_key}) taskqueue.Queue(self.queue_name).add(task)
python
def cleanup(self): """Clean up this Pipeline and all Datastore records used for coordination. Only works when called on a root pipeline. Child pipelines will ignore calls to this method. After this method is called, Pipeline.from_id() and related status methods will return inconsistent or missing results. This method is fire-and-forget and asynchronous. """ if self._root_pipeline_key is None: raise UnexpectedPipelineError( 'Could not cleanup Pipeline with unknown root pipeline ID.') if not self.is_root: return task = taskqueue.Task( params=dict(root_pipeline_key=self._root_pipeline_key), url=self.base_path + '/cleanup', headers={'X-Ae-Pipeline-Key': self._root_pipeline_key}) taskqueue.Queue(self.queue_name).add(task)
Clean up this Pipeline and all Datastore records used for coordination. Only works when called on a root pipeline. Child pipelines will ignore calls to this method. After this method is called, Pipeline.from_id() and related status methods will return inconsistent or missing results. This method is fire-and-forget and asynchronous.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L937-L956
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline.with_params
def with_params(self, **kwargs): """Modify various execution parameters of a Pipeline before it runs. This method has no effect in test mode. Args: kwargs: Attributes to modify on this Pipeline instance before it has been executed. Returns: This Pipeline instance, for easy chaining. """ if _TEST_MODE: logging.info( 'Setting runtime parameters for %s#%s: %r', self, self.pipeline_id, kwargs) return self if self.pipeline_id is not None: raise UnexpectedPipelineError( 'May only call with_params() on a Pipeline that has not yet ' 'been scheduled for execution.') ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target') for name, value in kwargs.iteritems(): if name not in ALLOWED: raise TypeError('Unexpected keyword: %s=%r' % (name, value)) setattr(self, name, value) return self
python
def with_params(self, **kwargs): """Modify various execution parameters of a Pipeline before it runs. This method has no effect in test mode. Args: kwargs: Attributes to modify on this Pipeline instance before it has been executed. Returns: This Pipeline instance, for easy chaining. """ if _TEST_MODE: logging.info( 'Setting runtime parameters for %s#%s: %r', self, self.pipeline_id, kwargs) return self if self.pipeline_id is not None: raise UnexpectedPipelineError( 'May only call with_params() on a Pipeline that has not yet ' 'been scheduled for execution.') ALLOWED = ('backoff_seconds', 'backoff_factor', 'max_attempts', 'target') for name, value in kwargs.iteritems(): if name not in ALLOWED: raise TypeError('Unexpected keyword: %s=%r' % (name, value)) setattr(self, name, value) return self
Modify various execution parameters of a Pipeline before it runs. This method has no effect in test mode. Args: kwargs: Attributes to modify on this Pipeline instance before it has been executed. Returns: This Pipeline instance, for easy chaining.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L958-L986
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._set_class_path
def _set_class_path(cls, module_dict=sys.modules): """Sets the absolute path to this class as a string. Used by the Pipeline API to reconstruct the Pipeline sub-class object at execution time instead of passing around a serialized function. Args: module_dict: Used for testing. """ # Do not traverse the class hierarchy fetching the class path attribute. found = cls.__dict__.get('_class_path') if found is not None: return # Do not set the _class_path for the base-class, otherwise all children's # lookups for _class_path will fall through and return 'Pipeline' above. # This situation can happen if users call the generic Pipeline.from_id # to get the result of a Pipeline without knowing its specific class. if cls is Pipeline: return class_path = '%s.%s' % (cls.__module__, cls.__name__) # When a WSGI handler is invoked as an entry point, any Pipeline class # defined in the same file as the handler will get __module__ set to # __main__. Thus we need to find out its real fully qualified path. if cls.__module__ == '__main__': for name, module in module_dict.items(): if name == '__main__': continue found = getattr(module, cls.__name__, None) if found is cls: class_path = '%s.%s' % (name, cls.__name__) break cls._class_path = class_path
python
def _set_class_path(cls, module_dict=sys.modules): """Sets the absolute path to this class as a string. Used by the Pipeline API to reconstruct the Pipeline sub-class object at execution time instead of passing around a serialized function. Args: module_dict: Used for testing. """ # Do not traverse the class hierarchy fetching the class path attribute. found = cls.__dict__.get('_class_path') if found is not None: return # Do not set the _class_path for the base-class, otherwise all children's # lookups for _class_path will fall through and return 'Pipeline' above. # This situation can happen if users call the generic Pipeline.from_id # to get the result of a Pipeline without knowing its specific class. if cls is Pipeline: return class_path = '%s.%s' % (cls.__module__, cls.__name__) # When a WSGI handler is invoked as an entry point, any Pipeline class # defined in the same file as the handler will get __module__ set to # __main__. Thus we need to find out its real fully qualified path. if cls.__module__ == '__main__': for name, module in module_dict.items(): if name == '__main__': continue found = getattr(module, cls.__name__, None) if found is cls: class_path = '%s.%s' % (name, cls.__name__) break cls._class_path = class_path
Sets the absolute path to this class as a string. Used by the Pipeline API to reconstruct the Pipeline sub-class object at execution time instead of passing around a serialized function. Args: module_dict: Used for testing.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1035-L1068
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._set_values_internal
def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status): """Sets the user-visible values provided as an API by this class. Args: context: The _PipelineContext used for this Pipeline. pipeline_key: The db.Key of this pipeline. root_pipeline_key: The db.Key of the root pipeline. outputs: The PipelineFuture for this pipeline. result_status: The result status of this pipeline. """ self._context = context self._pipeline_key = pipeline_key self._root_pipeline_key = root_pipeline_key self._result_status = result_status self.outputs = outputs
python
def _set_values_internal(self, context, pipeline_key, root_pipeline_key, outputs, result_status): """Sets the user-visible values provided as an API by this class. Args: context: The _PipelineContext used for this Pipeline. pipeline_key: The db.Key of this pipeline. root_pipeline_key: The db.Key of the root pipeline. outputs: The PipelineFuture for this pipeline. result_status: The result status of this pipeline. """ self._context = context self._pipeline_key = pipeline_key self._root_pipeline_key = root_pipeline_key self._result_status = result_status self.outputs = outputs
Sets the user-visible values provided as an API by this class. Args: context: The _PipelineContext used for this Pipeline. pipeline_key: The db.Key of this pipeline. root_pipeline_key: The db.Key of the root pipeline. outputs: The PipelineFuture for this pipeline. result_status: The result status of this pipeline.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1070-L1089
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._callback_internal
def _callback_internal(self, kwargs): """Used to execute callbacks on asynchronous pipelines.""" logging.debug('Callback %s(*%s, **%s)#%s with params: %r', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name(), kwargs) return self.callback(**kwargs)
python
def _callback_internal(self, kwargs): """Used to execute callbacks on asynchronous pipelines.""" logging.debug('Callback %s(*%s, **%s)#%s with params: %r', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name(), kwargs) return self.callback(**kwargs)
Used to execute callbacks on asynchronous pipelines.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1091-L1096
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._run_internal
def _run_internal(self, context, pipeline_key, root_pipeline_key, caller_output): """Used by the Pipeline evaluator to execute this Pipeline.""" self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, _PipelineRecord.RUN) logging.debug('Running %s(*%s, **%s)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) return self.run(*self.args, **self.kwargs)
python
def _run_internal(self, context, pipeline_key, root_pipeline_key, caller_output): """Used by the Pipeline evaluator to execute this Pipeline.""" self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, _PipelineRecord.RUN) logging.debug('Running %s(*%s, **%s)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) return self.run(*self.args, **self.kwargs)
Used by the Pipeline evaluator to execute this Pipeline.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1098-L1110
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
Pipeline._finalized_internal
def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): """Used by the Pipeline evaluator to finalize this Pipeline.""" result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug('Finalizing %s(*%r, **%r)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass
python
def _finalized_internal(self, context, pipeline_key, root_pipeline_key, caller_output, aborted): """Used by the Pipeline evaluator to finalize this Pipeline.""" result_status = _PipelineRecord.RUN if aborted: result_status = _PipelineRecord.ABORTED self._set_values_internal( context, pipeline_key, root_pipeline_key, caller_output, result_status) logging.debug('Finalizing %s(*%r, **%r)#%s', self._class_path, _short_repr(self.args), _short_repr(self.kwargs), self._pipeline_key.name()) try: self.finalized() except NotImplementedError: pass
Used by the Pipeline evaluator to finalize this Pipeline.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1112-L1131
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
InOrder._add_future
def _add_future(cls, future): """Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list. """ if cls._local._activated: cls._local._in_order_futures.add(future)
python
def _add_future(cls, future): """Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list. """ if cls._local._activated: cls._local._in_order_futures.add(future)
Adds a future to the list of in-order futures thus far. Args: future: The future to add to the list.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1190-L1197
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
InOrder._thread_init
def _thread_init(cls): """Ensure thread local is initialized.""" if not hasattr(cls._local, '_in_order_futures'): cls._local._in_order_futures = set() cls._local._activated = False
python
def _thread_init(cls): """Ensure thread local is initialized.""" if not hasattr(cls._local, '_in_order_futures'): cls._local._in_order_futures = set() cls._local._activated = False
Ensure thread local is initialized.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1224-L1228
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.from_environ
def from_environ(cls, environ=os.environ): """Constructs a _PipelineContext from the task queue environment.""" base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2] return cls( environ['HTTP_X_APPENGINE_TASKNAME'], environ['HTTP_X_APPENGINE_QUEUENAME'], base_path)
python
def from_environ(cls, environ=os.environ): """Constructs a _PipelineContext from the task queue environment.""" base_path, unused = (environ['PATH_INFO'].rsplit('/', 1) + [''])[:2] return cls( environ['HTTP_X_APPENGINE_TASKNAME'], environ['HTTP_X_APPENGINE_QUEUENAME'], base_path)
Constructs a _PipelineContext from the task queue environment.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1452-L1458
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.fill_slot
def fill_slot(self, filler_pipeline_key, slot, value): """Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore. """ if not isinstance(filler_pipeline_key, db.Key): filler_pipeline_key = db.Key(filler_pipeline_key) if _TEST_MODE: slot._set_value_test(filler_pipeline_key, value) else: encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder) value_text = None value_blob = None if len(encoded_value) <= _MAX_JSON_SIZE: value_text = db.Text(encoded_value) else: # The encoded value is too big. Save it as a blob. value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name()) def txn(): slot_record = db.get(slot.key) if slot_record is None: raise UnexpectedPipelineError( 'Tried to fill missing slot "%s" ' 'by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)) # NOTE: Always take the override value here. If down-stream pipelines # need a consitent view of all up-stream outputs (meaning, all of the # outputs came from the same retry attempt of the upstream pipeline), # the down-stream pipeline must also wait for the 'default' output # of these up-stream pipelines. slot_record.filler = filler_pipeline_key slot_record.value_text = value_text slot_record.value_blob = value_blob slot_record.status = _SlotRecord.FILLED slot_record.fill_time = self._gettime() slot_record.put() task = taskqueue.Task( url=self.barrier_handler_path, params=dict( slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) db.run_in_transaction_options( db.create_transaction_options(propagation=db.ALLOWED), txn) self.session_filled_output_names.add(slot.name)
python
def fill_slot(self, filler_pipeline_key, slot, value): """Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore. """ if not isinstance(filler_pipeline_key, db.Key): filler_pipeline_key = db.Key(filler_pipeline_key) if _TEST_MODE: slot._set_value_test(filler_pipeline_key, value) else: encoded_value = json.dumps(value, sort_keys=True, cls=mr_util.JsonEncoder) value_text = None value_blob = None if len(encoded_value) <= _MAX_JSON_SIZE: value_text = db.Text(encoded_value) else: # The encoded value is too big. Save it as a blob. value_blob = _write_json_blob(encoded_value, filler_pipeline_key.name()) def txn(): slot_record = db.get(slot.key) if slot_record is None: raise UnexpectedPipelineError( 'Tried to fill missing slot "%s" ' 'by pipeline ID "%s" with value: %r' % (slot.key, filler_pipeline_key.name(), value)) # NOTE: Always take the override value here. If down-stream pipelines # need a consitent view of all up-stream outputs (meaning, all of the # outputs came from the same retry attempt of the upstream pipeline), # the down-stream pipeline must also wait for the 'default' output # of these up-stream pipelines. slot_record.filler = filler_pipeline_key slot_record.value_text = value_text slot_record.value_blob = value_blob slot_record.status = _SlotRecord.FILLED slot_record.fill_time = self._gettime() slot_record.put() task = taskqueue.Task( url=self.barrier_handler_path, params=dict( slot_key=slot.key, use_barrier_indexes=True), headers={'X-Ae-Slot-Key': slot.key, 'X-Ae-Filler-Pipeline-Key': filler_pipeline_key}) task.add(queue_name=self.queue_name, transactional=True) db.run_in_transaction_options( db.create_transaction_options(propagation=db.ALLOWED), txn) self.session_filled_output_names.add(slot.name)
Fills a slot, enqueueing a task to trigger pending barriers. Args: filler_pipeline_key: db.Key or stringified key of the _PipelineRecord that filled this slot. slot: The Slot instance to fill. value: The serializable value to assign. Raises: UnexpectedPipelineError if the _SlotRecord for the 'slot' could not be found in the Datastore.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1460-L1519
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.notify_barriers
def notify_barriers(self, slot_key, cursor, use_barrier_indexes, max_to_notify=_MAX_BARRIERS_TO_NOTIFY): """Searches for barriers affected by a slot and triggers completed ones. Args: slot_key: db.Key or stringified key of the _SlotRecord that was filled. cursor: Stringified Datastore cursor where the notification query should pick up. use_barrier_indexes: When True, use _BarrierIndex records to determine which _Barriers to trigger by having this _SlotRecord filled. When False, use the old method that queries for _BarrierRecords by the blocking_slots parameter. max_to_notify: Used for testing. Raises: PipelineStatusError: If any of the barriers are in a bad state. """ if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) logging.debug('Notifying slot %r', slot_key) if use_barrier_indexes: # Please see models.py:_BarrierIndex to understand how _BarrierIndex # entities relate to _BarrierRecord entities. query = ( _BarrierIndex.all(cursor=cursor, keys_only=True) .ancestor(slot_key)) barrier_index_list = query.fetch(max_to_notify) barrier_key_list = [ _BarrierIndex.to_barrier_key(key) for key in barrier_index_list] # If there are task and pipeline kickoff retries it's possible for a # _BarrierIndex to exist for a _BarrierRecord that was not successfully # written. It's safe to ignore this because the original task that wrote # the _BarrierIndex and _BarrierRecord would not have made progress to # kick off a real pipeline or child pipeline unless all of the writes for # these dependent entities went through. We assume that the instigator # retried from scratch and somehwere there exists a good _BarrierIndex and # corresponding _BarrierRecord that tries to accomplish the same thing. barriers = db.get(barrier_key_list) results = [] for barrier_key, barrier in zip(barrier_key_list, barriers): if barrier is None: logging.debug('Ignoring that Barrier "%r" is missing, ' 'relies on Slot "%r"', barrier_key, slot_key) else: results.append(barrier) else: # TODO(user): Delete this backwards compatible codepath and # make use_barrier_indexes the assumed default in all cases. query = ( _BarrierRecord.all(cursor=cursor) .filter('blocking_slots =', slot_key)) results = query.fetch(max_to_notify) # Fetch all blocking _SlotRecords for any potentially triggered barriers. blocking_slot_keys = [] for barrier in results: blocking_slot_keys.extend(barrier.blocking_slots) blocking_slot_dict = {} for slot_record in db.get(blocking_slot_keys): if slot_record is None: continue blocking_slot_dict[slot_record.key()] = slot_record task_list = [] updated_barriers = [] for barrier in results: ready_slots = [] for blocking_slot_key in barrier.blocking_slots: slot_record = blocking_slot_dict.get(blocking_slot_key) if slot_record is None: raise UnexpectedPipelineError( 'Barrier "%r" relies on Slot "%r" which is missing.' % (barrier.key(), blocking_slot_key)) if slot_record.status == _SlotRecord.FILLED: ready_slots.append(blocking_slot_key) # When all of the blocking_slots have been filled, consider the barrier # ready to trigger. We'll trigger it regardless of the current # _BarrierRecord status, since there could be task queue failures at any # point in this flow; this rolls forward the state and de-dupes using # the task name tombstones. pending_slots = set(barrier.blocking_slots) - set(ready_slots) if not pending_slots: if barrier.status != _BarrierRecord.FIRED: barrier.status = _BarrierRecord.FIRED barrier.trigger_time = self._gettime() updated_barriers.append(barrier) purpose = barrier.key().name() if purpose == _BarrierRecord.START: path = self.pipeline_handler_path countdown = None else: path = self.finalized_handler_path # NOTE: Wait one second before finalization to prevent # contention on the _PipelineRecord entity. countdown = 1 pipeline_key = _BarrierRecord.target.get_value_for_datastore(barrier) pipeline_record = db.get(pipeline_key) logging.debug('Firing barrier %r', barrier.key()) task_list.append(taskqueue.Task( url=path, countdown=countdown, name='ae-barrier-fire-%s-%s' % (pipeline_key.name(), purpose), params=dict(pipeline_key=pipeline_key, purpose=purpose), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target'])) else: logging.debug('Not firing barrier %r, Waiting for slots: %r', barrier.key(), pending_slots) # Blindly overwrite _BarrierRecords that have an updated status. This is # acceptable because by this point all finalization barriers for # generator children should have already had their final outputs assigned. if updated_barriers: db.put(updated_barriers) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-ae-barrier-notify-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-ae-barrier-notify-%d' % (prefix, end), url=self.barrier_handler_path, params=dict( slot_key=slot_key, cursor=query.cursor(), use_barrier_indexes=use_barrier_indexes))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
python
def notify_barriers(self, slot_key, cursor, use_barrier_indexes, max_to_notify=_MAX_BARRIERS_TO_NOTIFY): """Searches for barriers affected by a slot and triggers completed ones. Args: slot_key: db.Key or stringified key of the _SlotRecord that was filled. cursor: Stringified Datastore cursor where the notification query should pick up. use_barrier_indexes: When True, use _BarrierIndex records to determine which _Barriers to trigger by having this _SlotRecord filled. When False, use the old method that queries for _BarrierRecords by the blocking_slots parameter. max_to_notify: Used for testing. Raises: PipelineStatusError: If any of the barriers are in a bad state. """ if not isinstance(slot_key, db.Key): slot_key = db.Key(slot_key) logging.debug('Notifying slot %r', slot_key) if use_barrier_indexes: # Please see models.py:_BarrierIndex to understand how _BarrierIndex # entities relate to _BarrierRecord entities. query = ( _BarrierIndex.all(cursor=cursor, keys_only=True) .ancestor(slot_key)) barrier_index_list = query.fetch(max_to_notify) barrier_key_list = [ _BarrierIndex.to_barrier_key(key) for key in barrier_index_list] # If there are task and pipeline kickoff retries it's possible for a # _BarrierIndex to exist for a _BarrierRecord that was not successfully # written. It's safe to ignore this because the original task that wrote # the _BarrierIndex and _BarrierRecord would not have made progress to # kick off a real pipeline or child pipeline unless all of the writes for # these dependent entities went through. We assume that the instigator # retried from scratch and somehwere there exists a good _BarrierIndex and # corresponding _BarrierRecord that tries to accomplish the same thing. barriers = db.get(barrier_key_list) results = [] for barrier_key, barrier in zip(barrier_key_list, barriers): if barrier is None: logging.debug('Ignoring that Barrier "%r" is missing, ' 'relies on Slot "%r"', barrier_key, slot_key) else: results.append(barrier) else: # TODO(user): Delete this backwards compatible codepath and # make use_barrier_indexes the assumed default in all cases. query = ( _BarrierRecord.all(cursor=cursor) .filter('blocking_slots =', slot_key)) results = query.fetch(max_to_notify) # Fetch all blocking _SlotRecords for any potentially triggered barriers. blocking_slot_keys = [] for barrier in results: blocking_slot_keys.extend(barrier.blocking_slots) blocking_slot_dict = {} for slot_record in db.get(blocking_slot_keys): if slot_record is None: continue blocking_slot_dict[slot_record.key()] = slot_record task_list = [] updated_barriers = [] for barrier in results: ready_slots = [] for blocking_slot_key in barrier.blocking_slots: slot_record = blocking_slot_dict.get(blocking_slot_key) if slot_record is None: raise UnexpectedPipelineError( 'Barrier "%r" relies on Slot "%r" which is missing.' % (barrier.key(), blocking_slot_key)) if slot_record.status == _SlotRecord.FILLED: ready_slots.append(blocking_slot_key) # When all of the blocking_slots have been filled, consider the barrier # ready to trigger. We'll trigger it regardless of the current # _BarrierRecord status, since there could be task queue failures at any # point in this flow; this rolls forward the state and de-dupes using # the task name tombstones. pending_slots = set(barrier.blocking_slots) - set(ready_slots) if not pending_slots: if barrier.status != _BarrierRecord.FIRED: barrier.status = _BarrierRecord.FIRED barrier.trigger_time = self._gettime() updated_barriers.append(barrier) purpose = barrier.key().name() if purpose == _BarrierRecord.START: path = self.pipeline_handler_path countdown = None else: path = self.finalized_handler_path # NOTE: Wait one second before finalization to prevent # contention on the _PipelineRecord entity. countdown = 1 pipeline_key = _BarrierRecord.target.get_value_for_datastore(barrier) pipeline_record = db.get(pipeline_key) logging.debug('Firing barrier %r', barrier.key()) task_list.append(taskqueue.Task( url=path, countdown=countdown, name='ae-barrier-fire-%s-%s' % (pipeline_key.name(), purpose), params=dict(pipeline_key=pipeline_key, purpose=purpose), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target'])) else: logging.debug('Not firing barrier %r, Waiting for slots: %r', barrier.key(), pending_slots) # Blindly overwrite _BarrierRecords that have an updated status. This is # acceptable because by this point all finalization barriers for # generator children should have already had their final outputs assigned. if updated_barriers: db.put(updated_barriers) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-ae-barrier-notify-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-ae-barrier-notify-%d' % (prefix, end), url=self.barrier_handler_path, params=dict( slot_key=slot_key, cursor=query.cursor(), use_barrier_indexes=use_barrier_indexes))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
Searches for barriers affected by a slot and triggers completed ones. Args: slot_key: db.Key or stringified key of the _SlotRecord that was filled. cursor: Stringified Datastore cursor where the notification query should pick up. use_barrier_indexes: When True, use _BarrierIndex records to determine which _Barriers to trigger by having this _SlotRecord filled. When False, use the old method that queries for _BarrierRecords by the blocking_slots parameter. max_to_notify: Used for testing. Raises: PipelineStatusError: If any of the barriers are in a bad state.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1521-L1665
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.begin_abort
def begin_abort(self, root_pipeline_key, abort_message): """Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise. """ def txn(): pipeline_record = db.get(root_pipeline_key) if pipeline_record is None: logging.warning( 'Tried to abort root pipeline ID "%s" but it does not exist.', root_pipeline_key.name()) raise db.Rollback() if pipeline_record.status == _PipelineRecord.ABORTED: logging.warning( 'Tried to abort root pipeline ID "%s"; already in state: %s', root_pipeline_key.name(), pipeline_record.status) raise db.Rollback() if pipeline_record.abort_requested: logging.warning( 'Tried to abort root pipeline ID "%s"; abort signal already sent.', root_pipeline_key.name()) raise db.Rollback() pipeline_record.abort_requested = True pipeline_record.abort_message = abort_message pipeline_record.put() task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) return True return db.run_in_transaction(txn)
python
def begin_abort(self, root_pipeline_key, abort_message): """Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise. """ def txn(): pipeline_record = db.get(root_pipeline_key) if pipeline_record is None: logging.warning( 'Tried to abort root pipeline ID "%s" but it does not exist.', root_pipeline_key.name()) raise db.Rollback() if pipeline_record.status == _PipelineRecord.ABORTED: logging.warning( 'Tried to abort root pipeline ID "%s"; already in state: %s', root_pipeline_key.name(), pipeline_record.status) raise db.Rollback() if pipeline_record.abort_requested: logging.warning( 'Tried to abort root pipeline ID "%s"; abort signal already sent.', root_pipeline_key.name()) raise db.Rollback() pipeline_record.abort_requested = True pipeline_record.abort_message = abort_message pipeline_record.put() task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) return True return db.run_in_transaction(txn)
Kicks off the abort process for a root pipeline and all its children. Args: root_pipeline_key: db.Key of the root pipeline to abort. abort_message: Message explaining why the abort happened, only saved into the root pipeline. Returns: True if the abort signal was sent successfully; False otherwise.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1667-L1706
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.continue_abort
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): """Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing. """ if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) # NOTE: The results of this query may include _PipelineRecord instances # that are not actually "reachable", meaning you cannot get to them by # starting at the root pipeline and following "fanned_out" onward. This # is acceptable because even these defunct _PipelineRecords will properly # set their status to ABORTED when the signal comes, regardless of any # other status they may have had. # # The only gotcha here is if a Pipeline's finalize method somehow modifies # its inputs (like deleting an input file). In the case there are # unreachable child pipelines, it will appear as if two finalize methods # have been called instead of just one. The saving grace here is that # finalize must be idempotent, so this *should* be harmless. query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
python
def continue_abort(self, root_pipeline_key, cursor=None, max_to_notify=_MAX_ABORTS_TO_BEGIN): """Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing. """ if not isinstance(root_pipeline_key, db.Key): root_pipeline_key = db.Key(root_pipeline_key) # NOTE: The results of this query may include _PipelineRecord instances # that are not actually "reachable", meaning you cannot get to them by # starting at the root pipeline and following "fanned_out" onward. This # is acceptable because even these defunct _PipelineRecords will properly # set their status to ABORTED when the signal comes, regardless of any # other status they may have had. # # The only gotcha here is if a Pipeline's finalize method somehow modifies # its inputs (like deleting an input file). In the case there are # unreachable child pipelines, it will appear as if two finalize methods # have been called instead of just one. The saving grace here is that # finalize must be idempotent, so this *should* be harmless. query = ( _PipelineRecord.all(cursor=cursor) .filter('root_pipeline =', root_pipeline_key)) results = query.fetch(max_to_notify) task_list = [] for pipeline_record in results: if pipeline_record.status not in ( _PipelineRecord.RUN, _PipelineRecord.WAITING): continue pipeline_key = pipeline_record.key() task_list.append(taskqueue.Task( name='%s-%s-abort' % (self.task_name, pipeline_key.name()), url=self.abort_handler_path, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.ABORT), headers={'X-Ae-Pipeline-Key': pipeline_key})) # Task continuation with sequence number to prevent fork-bombs. if len(results) == max_to_notify: the_match = re.match('(.*)-([0-9]+)', self.task_name) if the_match: prefix = the_match.group(1) end = int(the_match.group(2)) + 1 else: prefix = self.task_name end = 0 task_list.append(taskqueue.Task( name='%s-%d' % (prefix, end), url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key, cursor=query.cursor()))) if task_list: try: taskqueue.Queue(self.queue_name).add(task_list) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError): pass
Sends the abort signal to all children for a root pipeline. Args: root_pipeline_key: db.Key of the root pipeline to abort. cursor: The query cursor for enumerating _PipelineRecords when inserting tasks to cause child pipelines to terminate. max_to_notify: Used for testing.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1708-L1771
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.start
def start(self, pipeline, return_task=True, countdown=None, eta=None): """Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists. """ # Adjust all pipeline output keys for this Pipeline to be children of # the _PipelineRecord, that way we can write them all and submit in a # single transaction. for name, slot in pipeline.outputs._output_dict.iteritems(): slot.key = db.Key.from_path( *slot.key.to_path(), **dict(parent=pipeline._pipeline_key)) _, output_slots, params_text, params_blob = _generate_args( pipeline, pipeline.outputs, self.queue_name, self.base_path) @db.transactional(propagation=db.INDEPENDENT) def txn(): pipeline_record = db.get(pipeline._pipeline_key) if pipeline_record is not None: raise PipelineExistsError( 'Pipeline with idempotence key "%s" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params))) entities_to_put = [] for name, slot in pipeline.outputs._output_dict.iteritems(): entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=pipeline._pipeline_key)) entities_to_put.append(_PipelineRecord( key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts)) entities_to_put.extend(_PipelineContext._create_barrier_entities( pipeline._pipeline_key, pipeline._pipeline_key, _BarrierRecord.FINALIZE, output_slots)) db.put(entities_to_put) task = taskqueue.Task( url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target, countdown=countdown, eta=eta) if return_task: return task task.add(queue_name=self.queue_name, transactional=True) task = txn() # Immediately mark the output slots as existing so they can be filled # by asynchronous pipelines or used in test mode. for output_slot in pipeline.outputs._output_dict.itervalues(): output_slot._exists = True return task
python
def start(self, pipeline, return_task=True, countdown=None, eta=None): """Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists. """ # Adjust all pipeline output keys for this Pipeline to be children of # the _PipelineRecord, that way we can write them all and submit in a # single transaction. for name, slot in pipeline.outputs._output_dict.iteritems(): slot.key = db.Key.from_path( *slot.key.to_path(), **dict(parent=pipeline._pipeline_key)) _, output_slots, params_text, params_blob = _generate_args( pipeline, pipeline.outputs, self.queue_name, self.base_path) @db.transactional(propagation=db.INDEPENDENT) def txn(): pipeline_record = db.get(pipeline._pipeline_key) if pipeline_record is not None: raise PipelineExistsError( 'Pipeline with idempotence key "%s" already exists; params=%s' % (pipeline._pipeline_key.name(), _short_repr(pipeline_record.params))) entities_to_put = [] for name, slot in pipeline.outputs._output_dict.iteritems(): entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=pipeline._pipeline_key)) entities_to_put.append(_PipelineRecord( key=pipeline._pipeline_key, root_pipeline=pipeline._pipeline_key, is_root_pipeline=True, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, start_time=self._gettime(), class_path=pipeline._class_path, max_attempts=pipeline.max_attempts)) entities_to_put.extend(_PipelineContext._create_barrier_entities( pipeline._pipeline_key, pipeline._pipeline_key, _BarrierRecord.FINALIZE, output_slots)) db.put(entities_to_put) task = taskqueue.Task( url=self.pipeline_handler_path, params=dict(pipeline_key=pipeline._pipeline_key), headers={'X-Ae-Pipeline-Key': pipeline._pipeline_key}, target=pipeline.target, countdown=countdown, eta=eta) if return_task: return task task.add(queue_name=self.queue_name, transactional=True) task = txn() # Immediately mark the output slots as existing so they can be filled # by asynchronous pipelines or used in test mode. for output_slot in pipeline.outputs._output_dict.itervalues(): output_slot._exists = True return task
Starts a pipeline. Args: pipeline: Pipeline instance to run. return_task: When True, do not submit the task to start the pipeline but instead return it for someone else to enqueue. countdown: Time in seconds into the future that this Task should execute. Defaults to zero. eta: A datetime.datetime specifying the absolute time at which the task should be executed. Must not be specified if 'countdown' is specified. This may be timezone-aware or timezone-naive. If None, defaults to now. For pull tasks, no worker will be able to lease this task before the time indicated by eta. Returns: The task to start this pipeline if return_task was True. Raises: PipelineExistsError if the pipeline with the given ID already exists.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1773-L1855
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.evaluate
def evaluate(self, pipeline_key, purpose=None, attempt=0): """Evaluates the given Pipeline and enqueues sub-stages for execution. Args: pipeline_key: The db.Key or stringified key of the _PipelineRecord to run. purpose: Why evaluate was called ('start', 'finalize', or 'abort'). attempt: The attempt number that should be tried. """ After._thread_init() InOrder._thread_init() InOrder._local._activated = False if not isinstance(pipeline_key, db.Key): pipeline_key = db.Key(pipeline_key) pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.error('Pipeline ID "%s" does not exist.', pipeline_key.name()) return if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): # If we're attempting to abort an already aborted pipeline, # we silently advance. #50 if (pipeline_record.status == _PipelineRecord.ABORTED and purpose == _BarrierRecord.ABORT): return logging.error('Pipeline ID "%s" in bad state for purpose "%s": "%s"', pipeline_key.name(), purpose or _BarrierRecord.START, pipeline_record.status) return params = pipeline_record.params root_pipeline_key = \ _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record) default_slot_key = db.Key(params['output_slots']['default']) default_slot_record, root_pipeline_record = db.get([ default_slot_key, root_pipeline_key]) if default_slot_record is None: logging.error('Pipeline ID "%s" default slot "%s" does not exist.', pipeline_key.name(), default_slot_key) return if root_pipeline_record is None: logging.error('Pipeline ID "%s" root pipeline ID "%s" is missing.', pipeline_key.name(), root_pipeline_key.name()) return # Always finalize if we're aborting so pipelines have a chance to cleanup # before they terminate. Pipelines must access 'was_aborted' to find # out how their finalization should work. abort_signal = ( purpose == _BarrierRecord.ABORT or root_pipeline_record.abort_requested == True) finalize_signal = ( (default_slot_record.status == _SlotRecord.FILLED and purpose == _BarrierRecord.FINALIZE) or abort_signal) try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: # This means something is wrong with the deployed code. Rely on the # taskqueue system to do retries. retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception( 'Could not locate %s#%s. %s', pipeline_record.class_path, pipeline_key.name(), retry_message) raise try: pipeline_func = pipeline_func_class.from_id( pipeline_key.name(), resolve_outputs=finalize_signal, _pipeline_record=pipeline_record) except SlotNotFilledError, e: logging.exception( 'Could not resolve arguments for %s#%s. Most likely this means there ' 'is a bug in the Pipeline runtime or some intermediate data has been ' 'deleted from the Datastore. Giving up.', pipeline_record.class_path, pipeline_key.name()) self.transition_aborted(pipeline_key) return except Exception, e: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception( 'Instantiating %s#%s raised exception. %s', pipeline_record.class_path, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_record.params['task_retry']: raise else: return else: pipeline_generator = mr_util.is_generator_function( pipeline_func_class.run) caller_output = pipeline_func.outputs if (abort_signal and pipeline_func.async and pipeline_record.status == _PipelineRecord.RUN and not pipeline_func.try_cancel()): logging.warning( 'Could not cancel and abort mid-flight async pipeline: %r#%s', pipeline_func, pipeline_key.name()) return if finalize_signal: try: pipeline_func._finalized_internal( self, pipeline_key, root_pipeline_key, caller_output, abort_signal) except Exception, e: # This means something is wrong with the deployed finalization code. # Rely on the taskqueue system to do retries. retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Finalizing %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) raise else: if not abort_signal: self.transition_complete(pipeline_key) return if abort_signal: logging.debug('Marking as aborted %s#%s', pipeline_func, pipeline_key.name()) self.transition_aborted(pipeline_key) return if pipeline_record.current_attempt != attempt: logging.error( 'Received evaluation task for pipeline ID "%s" attempt %d but ' 'current pending attempt is %d', pipeline_key.name(), attempt, pipeline_record.current_attempt) return if pipeline_record.current_attempt >= pipeline_record.max_attempts: logging.error( 'Received evaluation task for pipeline ID "%s" on attempt %d ' 'but that exceeds max attempts %d', pipeline_key.name(), attempt, pipeline_record.max_attempts) return if pipeline_record.next_retry_time is not None: retry_time = pipeline_record.next_retry_time - _RETRY_WIGGLE_TIMEDELTA if self._gettime() <= retry_time: detail_message = ( 'Received evaluation task for pipeline ID "%s" on attempt %d, ' 'which will not be ready until: %s' % (pipeline_key.name(), pipeline_record.current_attempt, pipeline_record.next_retry_time)) logging.warning(detail_message) raise UnexpectedPipelineError(detail_message) if pipeline_record.status == _PipelineRecord.RUN and pipeline_generator: if (default_slot_record.status == _SlotRecord.WAITING and not pipeline_record.fanned_out): # This properly handles the yield-less generator case when the # RUN state transition worked properly but outputting to the default # slot failed. self.fill_slot(pipeline_key, caller_output.default, None) return if (pipeline_record.status == _PipelineRecord.WAITING and pipeline_func.async): self.transition_run(pipeline_key) try: result = pipeline_func._run_internal( self, pipeline_key, root_pipeline_key, caller_output) except Exception, e: if self.handle_run_exception(pipeline_key, pipeline_func, e): raise else: return if pipeline_func.async: return if not pipeline_generator: # Catch any exceptions that are thrown when the pipeline's return # value is being serialized. This ensures that serialization errors # will cause normal abort/retry behavior. try: self.fill_slot(pipeline_key, caller_output.default, result) except Exception, e: retry_message = 'Bad return value. %s: %s' % ( e.__class__.__name__, str(e)) logging.exception( 'Generator %r#%s caused exception while serializing return ' 'value %r. %s', pipeline_func, pipeline_key.name(), result, retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_func.task_retry: raise else: return expected_outputs = set(caller_output._output_dict.iterkeys()) found_outputs = self.session_filled_output_names if expected_outputs != found_outputs: exception = SlotNotFilledError( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs, pipeline_key.name(), pipeline_func._class_path)) if self.handle_run_exception(pipeline_key, pipeline_func, exception): raise exception return pipeline_iter = result next_value = None last_sub_stage = None sub_stage = None sub_stage_dict = {} sub_stage_ordering = [] while True: try: yielded = pipeline_iter.send(next_value) except StopIteration: break except Exception, e: if self.handle_run_exception(pipeline_key, pipeline_func, e): raise else: return if isinstance(yielded, Pipeline): if yielded in sub_stage_dict: raise UnexpectedPipelineError( 'Already yielded pipeline object %r with pipeline ID %s' % (yielded, yielded.pipeline_id)) last_sub_stage = yielded next_value = PipelineFuture(yielded.output_names) next_value._after_all_pipelines.update(After._local._after_all_futures) next_value._after_all_pipelines.update(InOrder._local._in_order_futures) sub_stage_dict[yielded] = next_value sub_stage_ordering.append(yielded) InOrder._add_future(next_value) # To aid local testing, the task_retry flag (which instructs the # evaluator to raise all exceptions back up to the task queue) is # inherited by all children from the root down. yielded.task_retry = pipeline_func.task_retry else: raise UnexpectedPipelineError( 'Yielded a disallowed value: %r' % yielded) if last_sub_stage: # Final yielded stage inherits outputs from calling pipeline that were not # already filled during the generator's execution. inherited_outputs = params['output_slots'] for slot_name in self.session_filled_output_names: del inherited_outputs[slot_name] sub_stage_dict[last_sub_stage]._inherit_outputs( pipeline_record.class_path, inherited_outputs) else: # Here the generator has yielded nothing, and thus acts as a synchronous # function. We can skip the rest of the generator steps completely and # fill the default output slot to cause finalizing. expected_outputs = set(caller_output._output_dict.iterkeys()) expected_outputs.remove('default') found_outputs = self.session_filled_output_names if expected_outputs != found_outputs: exception = SlotNotFilledError( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs, pipeline_key.name(), pipeline_func._class_path)) if self.handle_run_exception(pipeline_key, pipeline_func, exception): raise exception else: self.fill_slot(pipeline_key, caller_output.default, None) self.transition_run(pipeline_key) return # Allocate any SlotRecords that do not yet exist. entities_to_put = [] for future in sub_stage_dict.itervalues(): for slot in future._output_dict.itervalues(): if not slot._exists: entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=root_pipeline_key)) # Allocate PipelineRecords and BarrierRecords for generator-run Pipelines. pipelines_to_run = set() all_children_keys = [] all_output_slots = set() for sub_stage in sub_stage_ordering: future = sub_stage_dict[sub_stage] # Catch any exceptions that are thrown when the pipeline's parameters # are being serialized. This ensures that serialization errors will # cause normal retry/abort behavior. try: dependent_slots, output_slots, params_text, params_blob = \ _generate_args(sub_stage, future, self.queue_name, self.base_path) except Exception, e: retry_message = 'Bad child arguments. %s: %s' % ( e.__class__.__name__, str(e)) logging.exception( 'Generator %r#%s caused exception while serializing args for ' 'child pipeline %r. %s', pipeline_func, pipeline_key.name(), sub_stage, retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_func.task_retry: raise else: return child_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), uuid.uuid4().hex) all_output_slots.update(output_slots) all_children_keys.append(child_pipeline_key) child_pipeline = _PipelineRecord( key=child_pipeline_key, root_pipeline=root_pipeline_key, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, class_path=sub_stage._class_path, max_attempts=sub_stage.max_attempts) entities_to_put.append(child_pipeline) if not dependent_slots: # This child pipeline will run immediately. pipelines_to_run.add(child_pipeline_key) child_pipeline.start_time = self._gettime() else: entities_to_put.extend(_PipelineContext._create_barrier_entities( root_pipeline_key, child_pipeline_key, _BarrierRecord.START, dependent_slots)) entities_to_put.extend(_PipelineContext._create_barrier_entities( root_pipeline_key, child_pipeline_key, _BarrierRecord.FINALIZE, output_slots)) # This generator pipeline's finalization barrier must include all of the # outputs of any child pipelines that it runs. This ensures the finalized # calls will not happen until all child pipelines have completed. # # The transition_run() call below will update the FINALIZE _BarrierRecord # for this generator pipeline to include all of these child outputs in # its list of blocking_slots. That update is done transactionally to # make sure the _BarrierRecord only lists the slots that matter. # # However, the notify_barriers() method doesn't find _BarrierRecords # through the blocking_slots field. It finds them through _BarrierIndexes # entities. Thus, before we update the FINALIZE _BarrierRecord in # transition_run(), we need to write _BarrierIndexes for all child outputs. barrier_entities = _PipelineContext._create_barrier_entities( root_pipeline_key, pipeline_key, _BarrierRecord.FINALIZE, all_output_slots) # Ignore the first element which is the _BarrierRecord. That entity must # have already been created and put in the datastore for the parent # pipeline before this code generated child pipelines. barrier_indexes = barrier_entities[1:] entities_to_put.extend(barrier_indexes) db.put(entities_to_put) self.transition_run(pipeline_key, blocking_slot_keys=all_output_slots, fanned_out_pipelines=all_children_keys, pipelines_to_run=pipelines_to_run)
python
def evaluate(self, pipeline_key, purpose=None, attempt=0): """Evaluates the given Pipeline and enqueues sub-stages for execution. Args: pipeline_key: The db.Key or stringified key of the _PipelineRecord to run. purpose: Why evaluate was called ('start', 'finalize', or 'abort'). attempt: The attempt number that should be tried. """ After._thread_init() InOrder._thread_init() InOrder._local._activated = False if not isinstance(pipeline_key, db.Key): pipeline_key = db.Key(pipeline_key) pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.error('Pipeline ID "%s" does not exist.', pipeline_key.name()) return if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): # If we're attempting to abort an already aborted pipeline, # we silently advance. #50 if (pipeline_record.status == _PipelineRecord.ABORTED and purpose == _BarrierRecord.ABORT): return logging.error('Pipeline ID "%s" in bad state for purpose "%s": "%s"', pipeline_key.name(), purpose or _BarrierRecord.START, pipeline_record.status) return params = pipeline_record.params root_pipeline_key = \ _PipelineRecord.root_pipeline.get_value_for_datastore(pipeline_record) default_slot_key = db.Key(params['output_slots']['default']) default_slot_record, root_pipeline_record = db.get([ default_slot_key, root_pipeline_key]) if default_slot_record is None: logging.error('Pipeline ID "%s" default slot "%s" does not exist.', pipeline_key.name(), default_slot_key) return if root_pipeline_record is None: logging.error('Pipeline ID "%s" root pipeline ID "%s" is missing.', pipeline_key.name(), root_pipeline_key.name()) return # Always finalize if we're aborting so pipelines have a chance to cleanup # before they terminate. Pipelines must access 'was_aborted' to find # out how their finalization should work. abort_signal = ( purpose == _BarrierRecord.ABORT or root_pipeline_record.abort_requested == True) finalize_signal = ( (default_slot_record.status == _SlotRecord.FILLED and purpose == _BarrierRecord.FINALIZE) or abort_signal) try: pipeline_func_class = mr_util.for_name(pipeline_record.class_path) except ImportError, e: # This means something is wrong with the deployed code. Rely on the # taskqueue system to do retries. retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception( 'Could not locate %s#%s. %s', pipeline_record.class_path, pipeline_key.name(), retry_message) raise try: pipeline_func = pipeline_func_class.from_id( pipeline_key.name(), resolve_outputs=finalize_signal, _pipeline_record=pipeline_record) except SlotNotFilledError, e: logging.exception( 'Could not resolve arguments for %s#%s. Most likely this means there ' 'is a bug in the Pipeline runtime or some intermediate data has been ' 'deleted from the Datastore. Giving up.', pipeline_record.class_path, pipeline_key.name()) self.transition_aborted(pipeline_key) return except Exception, e: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception( 'Instantiating %s#%s raised exception. %s', pipeline_record.class_path, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_record.params['task_retry']: raise else: return else: pipeline_generator = mr_util.is_generator_function( pipeline_func_class.run) caller_output = pipeline_func.outputs if (abort_signal and pipeline_func.async and pipeline_record.status == _PipelineRecord.RUN and not pipeline_func.try_cancel()): logging.warning( 'Could not cancel and abort mid-flight async pipeline: %r#%s', pipeline_func, pipeline_key.name()) return if finalize_signal: try: pipeline_func._finalized_internal( self, pipeline_key, root_pipeline_key, caller_output, abort_signal) except Exception, e: # This means something is wrong with the deployed finalization code. # Rely on the taskqueue system to do retries. retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Finalizing %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) raise else: if not abort_signal: self.transition_complete(pipeline_key) return if abort_signal: logging.debug('Marking as aborted %s#%s', pipeline_func, pipeline_key.name()) self.transition_aborted(pipeline_key) return if pipeline_record.current_attempt != attempt: logging.error( 'Received evaluation task for pipeline ID "%s" attempt %d but ' 'current pending attempt is %d', pipeline_key.name(), attempt, pipeline_record.current_attempt) return if pipeline_record.current_attempt >= pipeline_record.max_attempts: logging.error( 'Received evaluation task for pipeline ID "%s" on attempt %d ' 'but that exceeds max attempts %d', pipeline_key.name(), attempt, pipeline_record.max_attempts) return if pipeline_record.next_retry_time is not None: retry_time = pipeline_record.next_retry_time - _RETRY_WIGGLE_TIMEDELTA if self._gettime() <= retry_time: detail_message = ( 'Received evaluation task for pipeline ID "%s" on attempt %d, ' 'which will not be ready until: %s' % (pipeline_key.name(), pipeline_record.current_attempt, pipeline_record.next_retry_time)) logging.warning(detail_message) raise UnexpectedPipelineError(detail_message) if pipeline_record.status == _PipelineRecord.RUN and pipeline_generator: if (default_slot_record.status == _SlotRecord.WAITING and not pipeline_record.fanned_out): # This properly handles the yield-less generator case when the # RUN state transition worked properly but outputting to the default # slot failed. self.fill_slot(pipeline_key, caller_output.default, None) return if (pipeline_record.status == _PipelineRecord.WAITING and pipeline_func.async): self.transition_run(pipeline_key) try: result = pipeline_func._run_internal( self, pipeline_key, root_pipeline_key, caller_output) except Exception, e: if self.handle_run_exception(pipeline_key, pipeline_func, e): raise else: return if pipeline_func.async: return if not pipeline_generator: # Catch any exceptions that are thrown when the pipeline's return # value is being serialized. This ensures that serialization errors # will cause normal abort/retry behavior. try: self.fill_slot(pipeline_key, caller_output.default, result) except Exception, e: retry_message = 'Bad return value. %s: %s' % ( e.__class__.__name__, str(e)) logging.exception( 'Generator %r#%s caused exception while serializing return ' 'value %r. %s', pipeline_func, pipeline_key.name(), result, retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_func.task_retry: raise else: return expected_outputs = set(caller_output._output_dict.iterkeys()) found_outputs = self.session_filled_output_names if expected_outputs != found_outputs: exception = SlotNotFilledError( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs, pipeline_key.name(), pipeline_func._class_path)) if self.handle_run_exception(pipeline_key, pipeline_func, exception): raise exception return pipeline_iter = result next_value = None last_sub_stage = None sub_stage = None sub_stage_dict = {} sub_stage_ordering = [] while True: try: yielded = pipeline_iter.send(next_value) except StopIteration: break except Exception, e: if self.handle_run_exception(pipeline_key, pipeline_func, e): raise else: return if isinstance(yielded, Pipeline): if yielded in sub_stage_dict: raise UnexpectedPipelineError( 'Already yielded pipeline object %r with pipeline ID %s' % (yielded, yielded.pipeline_id)) last_sub_stage = yielded next_value = PipelineFuture(yielded.output_names) next_value._after_all_pipelines.update(After._local._after_all_futures) next_value._after_all_pipelines.update(InOrder._local._in_order_futures) sub_stage_dict[yielded] = next_value sub_stage_ordering.append(yielded) InOrder._add_future(next_value) # To aid local testing, the task_retry flag (which instructs the # evaluator to raise all exceptions back up to the task queue) is # inherited by all children from the root down. yielded.task_retry = pipeline_func.task_retry else: raise UnexpectedPipelineError( 'Yielded a disallowed value: %r' % yielded) if last_sub_stage: # Final yielded stage inherits outputs from calling pipeline that were not # already filled during the generator's execution. inherited_outputs = params['output_slots'] for slot_name in self.session_filled_output_names: del inherited_outputs[slot_name] sub_stage_dict[last_sub_stage]._inherit_outputs( pipeline_record.class_path, inherited_outputs) else: # Here the generator has yielded nothing, and thus acts as a synchronous # function. We can skip the rest of the generator steps completely and # fill the default output slot to cause finalizing. expected_outputs = set(caller_output._output_dict.iterkeys()) expected_outputs.remove('default') found_outputs = self.session_filled_output_names if expected_outputs != found_outputs: exception = SlotNotFilledError( 'Outputs %r for pipeline ID "%s" were never filled by "%s".' % ( expected_outputs - found_outputs, pipeline_key.name(), pipeline_func._class_path)) if self.handle_run_exception(pipeline_key, pipeline_func, exception): raise exception else: self.fill_slot(pipeline_key, caller_output.default, None) self.transition_run(pipeline_key) return # Allocate any SlotRecords that do not yet exist. entities_to_put = [] for future in sub_stage_dict.itervalues(): for slot in future._output_dict.itervalues(): if not slot._exists: entities_to_put.append(_SlotRecord( key=slot.key, root_pipeline=root_pipeline_key)) # Allocate PipelineRecords and BarrierRecords for generator-run Pipelines. pipelines_to_run = set() all_children_keys = [] all_output_slots = set() for sub_stage in sub_stage_ordering: future = sub_stage_dict[sub_stage] # Catch any exceptions that are thrown when the pipeline's parameters # are being serialized. This ensures that serialization errors will # cause normal retry/abort behavior. try: dependent_slots, output_slots, params_text, params_blob = \ _generate_args(sub_stage, future, self.queue_name, self.base_path) except Exception, e: retry_message = 'Bad child arguments. %s: %s' % ( e.__class__.__name__, str(e)) logging.exception( 'Generator %r#%s caused exception while serializing args for ' 'child pipeline %r. %s', pipeline_func, pipeline_key.name(), sub_stage, retry_message) self.transition_retry(pipeline_key, retry_message) if pipeline_func.task_retry: raise else: return child_pipeline_key = db.Key.from_path( _PipelineRecord.kind(), uuid.uuid4().hex) all_output_slots.update(output_slots) all_children_keys.append(child_pipeline_key) child_pipeline = _PipelineRecord( key=child_pipeline_key, root_pipeline=root_pipeline_key, # Bug in DB means we need to use the storage name here, # not the local property name. params=params_text, params_blob=params_blob, class_path=sub_stage._class_path, max_attempts=sub_stage.max_attempts) entities_to_put.append(child_pipeline) if not dependent_slots: # This child pipeline will run immediately. pipelines_to_run.add(child_pipeline_key) child_pipeline.start_time = self._gettime() else: entities_to_put.extend(_PipelineContext._create_barrier_entities( root_pipeline_key, child_pipeline_key, _BarrierRecord.START, dependent_slots)) entities_to_put.extend(_PipelineContext._create_barrier_entities( root_pipeline_key, child_pipeline_key, _BarrierRecord.FINALIZE, output_slots)) # This generator pipeline's finalization barrier must include all of the # outputs of any child pipelines that it runs. This ensures the finalized # calls will not happen until all child pipelines have completed. # # The transition_run() call below will update the FINALIZE _BarrierRecord # for this generator pipeline to include all of these child outputs in # its list of blocking_slots. That update is done transactionally to # make sure the _BarrierRecord only lists the slots that matter. # # However, the notify_barriers() method doesn't find _BarrierRecords # through the blocking_slots field. It finds them through _BarrierIndexes # entities. Thus, before we update the FINALIZE _BarrierRecord in # transition_run(), we need to write _BarrierIndexes for all child outputs. barrier_entities = _PipelineContext._create_barrier_entities( root_pipeline_key, pipeline_key, _BarrierRecord.FINALIZE, all_output_slots) # Ignore the first element which is the _BarrierRecord. That entity must # have already been created and put in the datastore for the parent # pipeline before this code generated child pipelines. barrier_indexes = barrier_entities[1:] entities_to_put.extend(barrier_indexes) db.put(entities_to_put) self.transition_run(pipeline_key, blocking_slot_keys=all_output_slots, fanned_out_pipelines=all_children_keys, pipelines_to_run=pipelines_to_run)
Evaluates the given Pipeline and enqueues sub-stages for execution. Args: pipeline_key: The db.Key or stringified key of the _PipelineRecord to run. purpose: Why evaluate was called ('start', 'finalize', or 'abort'). attempt: The attempt number that should be tried.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L1989-L2359
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext._create_barrier_entities
def _create_barrier_entities(root_pipeline_key, child_pipeline_key, purpose, blocking_slot_keys): """Creates all of the entities required for a _BarrierRecord. Args: root_pipeline_key: The root pipeline this is part of. child_pipeline_key: The pipeline this barrier is for. purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE. blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that this barrier should wait on before firing. Returns: List of entities, starting with the _BarrierRecord entity, followed by _BarrierIndexes used for firing when _SlotRecords are filled in the same order as the blocking_slot_keys list provided. All of these entities should be put in the Datastore to ensure the barrier fires properly. """ result = [] blocking_slot_keys = list(blocking_slot_keys) barrier = _BarrierRecord( parent=child_pipeline_key, key_name=purpose, target=child_pipeline_key, root_pipeline=root_pipeline_key, blocking_slots=blocking_slot_keys) result.append(barrier) for slot_key in blocking_slot_keys: barrier_index_path = [] barrier_index_path.extend(slot_key.to_path()) barrier_index_path.extend(child_pipeline_key.to_path()) barrier_index_path.extend([_BarrierIndex.kind(), purpose]) barrier_index_key = db.Key.from_path(*barrier_index_path) barrier_index = _BarrierIndex( key=barrier_index_key, root_pipeline=root_pipeline_key) result.append(barrier_index) return result
python
def _create_barrier_entities(root_pipeline_key, child_pipeline_key, purpose, blocking_slot_keys): """Creates all of the entities required for a _BarrierRecord. Args: root_pipeline_key: The root pipeline this is part of. child_pipeline_key: The pipeline this barrier is for. purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE. blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that this barrier should wait on before firing. Returns: List of entities, starting with the _BarrierRecord entity, followed by _BarrierIndexes used for firing when _SlotRecords are filled in the same order as the blocking_slot_keys list provided. All of these entities should be put in the Datastore to ensure the barrier fires properly. """ result = [] blocking_slot_keys = list(blocking_slot_keys) barrier = _BarrierRecord( parent=child_pipeline_key, key_name=purpose, target=child_pipeline_key, root_pipeline=root_pipeline_key, blocking_slots=blocking_slot_keys) result.append(barrier) for slot_key in blocking_slot_keys: barrier_index_path = [] barrier_index_path.extend(slot_key.to_path()) barrier_index_path.extend(child_pipeline_key.to_path()) barrier_index_path.extend([_BarrierIndex.kind(), purpose]) barrier_index_key = db.Key.from_path(*barrier_index_path) barrier_index = _BarrierIndex( key=barrier_index_key, root_pipeline=root_pipeline_key) result.append(barrier_index) return result
Creates all of the entities required for a _BarrierRecord. Args: root_pipeline_key: The root pipeline this is part of. child_pipeline_key: The pipeline this barrier is for. purpose: _BarrierRecord.START or _BarrierRecord.FINALIZE. blocking_slot_keys: Set of db.Keys corresponding to _SlotRecords that this barrier should wait on before firing. Returns: List of entities, starting with the _BarrierRecord entity, followed by _BarrierIndexes used for firing when _SlotRecords are filled in the same order as the blocking_slot_keys list provided. All of these entities should be put in the Datastore to ensure the barrier fires properly.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2362-L2405
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.handle_run_exception
def handle_run_exception(self, pipeline_key, pipeline_func, e): """Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method. """ if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry
python
def handle_run_exception(self, pipeline_key, pipeline_func, e): """Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method. """ if isinstance(e, Retry): retry_message = str(e) logging.warning('User forced retry for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, retry_message) self.transition_retry(pipeline_key, retry_message) elif isinstance(e, Abort): abort_message = str(e) logging.warning('User forced abort for pipeline ID "%s" of %r: %s', pipeline_key.name(), pipeline_func, abort_message) pipeline_func.abort(abort_message) else: retry_message = '%s: %s' % (e.__class__.__name__, str(e)) logging.exception('Generator %r#%s raised exception. %s', pipeline_func, pipeline_key.name(), retry_message) self.transition_retry(pipeline_key, retry_message) return pipeline_func.task_retry
Handles an exception raised by a Pipeline's user code. Args: pipeline_key: The pipeline that raised the error. pipeline_func: The class path name of the Pipeline that was running. e: The exception that was raised. Returns: True if the exception should be re-raised up through the calling stack by the caller of this method.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2407-L2435
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.transition_run
def transition_run(self, pipeline_key, blocking_slot_keys=None, fanned_out_pipelines=None, pipelines_to_run=None): """Marks an asynchronous or generator pipeline as running. Does nothing if the pipeline is no longer in a runnable state. Args: pipeline_key: The db.Key of the _PipelineRecord to update. blocking_slot_keys: List of db.Key instances that this pipeline's finalization barrier should wait on in addition to the existing one. This is used to update the barrier to include all child outputs. When None, the barrier will not be updated. fanned_out_pipelines: List of db.Key instances of _PipelineRecords that were fanned out by this generator pipeline. This is distinct from the 'pipelines_to_run' list because not all of the pipelines listed here will be immediately ready to execute. When None, then this generator yielded no children. pipelines_to_run: List of db.Key instances of _PipelineRecords that should be kicked off (fan-out) transactionally as part of this transition. When None, no child pipelines will run. All db.Keys in this list must also be present in the fanned_out_pipelines list. Raises: UnexpectedPipelineError if blocking_slot_keys was not empty and the _BarrierRecord has gone missing. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning('Pipeline ID "%s" cannot be marked as run. ' 'Does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status != _PipelineRecord.WAITING: logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.RUN if fanned_out_pipelines: # NOTE: We must model the pipeline relationship in a top-down manner, # meaning each pipeline must point forward to the pipelines that it # fanned out to. The reason is race conditions. If evaluate() # dies early, it may create many unused _PipelineRecord and _SlotRecord # instances that never progress. The only way we know which of these # are valid is by traversing the graph from the root, where the # fanned_out property refers to those pipelines that were run using a # transactional task. child_pipeline_list = list(fanned_out_pipelines) pipeline_record.fanned_out = child_pipeline_list if pipelines_to_run: child_indexes = [ child_pipeline_list.index(p) for p in pipelines_to_run] child_indexes.sort() task = taskqueue.Task( url=self.fanout_handler_path, params=dict(parent_key=str(pipeline_key), child_indexes=child_indexes)) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() if blocking_slot_keys: # NOTE: Always update a generator pipeline's finalization barrier to # include all of the outputs of any pipelines that it runs, to ensure # that finalized calls will not happen until all child pipelines have # completed. This must happen transactionally with the enqueue of # the fan-out kickoff task above to ensure the child output slots and # the barrier blocking slots are the same. barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) finalize_barrier = db.get(barrier_key) if finalize_barrier is None: raise UnexpectedPipelineError( 'Pipeline ID "%s" cannot update finalize barrier. ' 'Does not exist.' % pipeline_key.name()) else: finalize_barrier.blocking_slots = list( blocking_slot_keys.union(set(finalize_barrier.blocking_slots))) finalize_barrier.put() db.run_in_transaction(txn)
python
def transition_run(self, pipeline_key, blocking_slot_keys=None, fanned_out_pipelines=None, pipelines_to_run=None): """Marks an asynchronous or generator pipeline as running. Does nothing if the pipeline is no longer in a runnable state. Args: pipeline_key: The db.Key of the _PipelineRecord to update. blocking_slot_keys: List of db.Key instances that this pipeline's finalization barrier should wait on in addition to the existing one. This is used to update the barrier to include all child outputs. When None, the barrier will not be updated. fanned_out_pipelines: List of db.Key instances of _PipelineRecords that were fanned out by this generator pipeline. This is distinct from the 'pipelines_to_run' list because not all of the pipelines listed here will be immediately ready to execute. When None, then this generator yielded no children. pipelines_to_run: List of db.Key instances of _PipelineRecords that should be kicked off (fan-out) transactionally as part of this transition. When None, no child pipelines will run. All db.Keys in this list must also be present in the fanned_out_pipelines list. Raises: UnexpectedPipelineError if blocking_slot_keys was not empty and the _BarrierRecord has gone missing. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning('Pipeline ID "%s" cannot be marked as run. ' 'Does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status != _PipelineRecord.WAITING: logging.warning('Pipeline ID "%s" in bad state to be marked as run: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.RUN if fanned_out_pipelines: # NOTE: We must model the pipeline relationship in a top-down manner, # meaning each pipeline must point forward to the pipelines that it # fanned out to. The reason is race conditions. If evaluate() # dies early, it may create many unused _PipelineRecord and _SlotRecord # instances that never progress. The only way we know which of these # are valid is by traversing the graph from the root, where the # fanned_out property refers to those pipelines that were run using a # transactional task. child_pipeline_list = list(fanned_out_pipelines) pipeline_record.fanned_out = child_pipeline_list if pipelines_to_run: child_indexes = [ child_pipeline_list.index(p) for p in pipelines_to_run] child_indexes.sort() task = taskqueue.Task( url=self.fanout_handler_path, params=dict(parent_key=str(pipeline_key), child_indexes=child_indexes)) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() if blocking_slot_keys: # NOTE: Always update a generator pipeline's finalization barrier to # include all of the outputs of any pipelines that it runs, to ensure # that finalized calls will not happen until all child pipelines have # completed. This must happen transactionally with the enqueue of # the fan-out kickoff task above to ensure the child output slots and # the barrier blocking slots are the same. barrier_key = db.Key.from_path( _BarrierRecord.kind(), _BarrierRecord.FINALIZE, parent=pipeline_key) finalize_barrier = db.get(barrier_key) if finalize_barrier is None: raise UnexpectedPipelineError( 'Pipeline ID "%s" cannot update finalize barrier. ' 'Does not exist.' % pipeline_key.name()) else: finalize_barrier.blocking_slots = list( blocking_slot_keys.union(set(finalize_barrier.blocking_slots))) finalize_barrier.put() db.run_in_transaction(txn)
Marks an asynchronous or generator pipeline as running. Does nothing if the pipeline is no longer in a runnable state. Args: pipeline_key: The db.Key of the _PipelineRecord to update. blocking_slot_keys: List of db.Key instances that this pipeline's finalization barrier should wait on in addition to the existing one. This is used to update the barrier to include all child outputs. When None, the barrier will not be updated. fanned_out_pipelines: List of db.Key instances of _PipelineRecords that were fanned out by this generator pipeline. This is distinct from the 'pipelines_to_run' list because not all of the pipelines listed here will be immediately ready to execute. When None, then this generator yielded no children. pipelines_to_run: List of db.Key instances of _PipelineRecords that should be kicked off (fan-out) transactionally as part of this transition. When None, no child pipelines will run. All db.Keys in this list must also be present in the fanned_out_pipelines list. Raises: UnexpectedPipelineError if blocking_slot_keys was not empty and the _BarrierRecord has gone missing.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2437-L2523
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.transition_complete
def transition_complete(self, pipeline_key): """Marks the given pipeline as complete. Does nothing if the pipeline is no longer in a state that can be completed. Args: pipeline_key: db.Key of the _PipelineRecord that has completed. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to mark pipeline ID "%s" as complete but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to mark pipeline ID "%s" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.DONE pipeline_record.finalized_time = self._gettime() pipeline_record.put() db.run_in_transaction(txn)
python
def transition_complete(self, pipeline_key): """Marks the given pipeline as complete. Does nothing if the pipeline is no longer in a state that can be completed. Args: pipeline_key: db.Key of the _PipelineRecord that has completed. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to mark pipeline ID "%s" as complete but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to mark pipeline ID "%s" as complete, found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() pipeline_record.status = _PipelineRecord.DONE pipeline_record.finalized_time = self._gettime() pipeline_record.put() db.run_in_transaction(txn)
Marks the given pipeline as complete. Does nothing if the pipeline is no longer in a state that can be completed. Args: pipeline_key: db.Key of the _PipelineRecord that has completed.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2525-L2551
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_PipelineContext.transition_retry
def transition_retry(self, pipeline_key, retry_message): """Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() params = pipeline_record.params offset_seconds = ( params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt)) pipeline_record.next_retry_time = ( self._gettime() + datetime.timedelta(seconds=offset_seconds)) pipeline_record.current_attempt += 1 pipeline_record.retry_message = retry_message pipeline_record.status = _PipelineRecord.WAITING if pipeline_record.current_attempt >= pipeline_record.max_attempts: root_pipeline_key = ( _PipelineRecord.root_pipeline.get_value_for_datastore( pipeline_record)) logging.warning( 'Giving up on pipeline ID "%s" after %d attempt(s); causing abort ' 'all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name()) # NOTE: We do *not* set the status to aborted here to ensure that # this pipeline will be finalized before it has been marked as aborted. pipeline_record.abort_message = ( 'Aborting after %d attempts' % pipeline_record.current_attempt) task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) else: task = taskqueue.Task( url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target']) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() db.run_in_transaction(txn)
python
def transition_retry(self, pipeline_key, retry_message): """Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry. """ def txn(): pipeline_record = db.get(pipeline_key) if pipeline_record is None: logging.warning( 'Tried to retry pipeline ID "%s" but it does not exist.', pipeline_key.name()) raise db.Rollback() if pipeline_record.status not in ( _PipelineRecord.WAITING, _PipelineRecord.RUN): logging.warning( 'Tried to retry pipeline ID "%s", found bad state: %s', pipeline_key.name(), pipeline_record.status) raise db.Rollback() params = pipeline_record.params offset_seconds = ( params['backoff_seconds'] * (params['backoff_factor'] ** pipeline_record.current_attempt)) pipeline_record.next_retry_time = ( self._gettime() + datetime.timedelta(seconds=offset_seconds)) pipeline_record.current_attempt += 1 pipeline_record.retry_message = retry_message pipeline_record.status = _PipelineRecord.WAITING if pipeline_record.current_attempt >= pipeline_record.max_attempts: root_pipeline_key = ( _PipelineRecord.root_pipeline.get_value_for_datastore( pipeline_record)) logging.warning( 'Giving up on pipeline ID "%s" after %d attempt(s); causing abort ' 'all the way to the root pipeline ID "%s"', pipeline_key.name(), pipeline_record.current_attempt, root_pipeline_key.name()) # NOTE: We do *not* set the status to aborted here to ensure that # this pipeline will be finalized before it has been marked as aborted. pipeline_record.abort_message = ( 'Aborting after %d attempts' % pipeline_record.current_attempt) task = taskqueue.Task( url=self.fanout_abort_handler_path, params=dict(root_pipeline_key=root_pipeline_key)) task.add(queue_name=self.queue_name, transactional=True) else: task = taskqueue.Task( url=self.pipeline_handler_path, eta=pipeline_record.next_retry_time, params=dict(pipeline_key=pipeline_key, purpose=_BarrierRecord.START, attempt=pipeline_record.current_attempt), headers={'X-Ae-Pipeline-Key': pipeline_key}, target=pipeline_record.params['target']) task.add(queue_name=self.queue_name, transactional=True) pipeline_record.put() db.run_in_transaction(txn)
Marks the given pipeline as requiring another retry. Does nothing if all attempts have been exceeded. Args: pipeline_key: db.Key of the _PipelineRecord that needs to be retried. retry_message: User-supplied message indicating the reason for the retry.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2553-L2615
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/pipeline.py
_CallbackHandler.run_callback
def run_callback(self): """Runs the callback for the pipeline specified in the request. Raises: _CallbackTaskError if something was wrong with the request parameters. """ pipeline_id = self.request.get('pipeline_id') if not pipeline_id: raise _CallbackTaskError('"pipeline_id" parameter missing.') pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) pipeline_record = db.get(pipeline_key) if pipeline_record is None: raise _CallbackTaskError( 'Pipeline ID "%s" for callback does not exist.' % pipeline_id) params = pipeline_record.params real_class_path = params['class_path'] try: pipeline_func_class = mr_util.for_name(real_class_path) except ImportError, e: raise _CallbackTaskError( 'Cannot load class named "%s" for pipeline ID "%s".' % (real_class_path, pipeline_id)) if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: if pipeline_func_class.public_callbacks: pass elif pipeline_func_class.admin_callbacks: if not users.is_current_user_admin(): raise _CallbackTaskError( 'Unauthorized callback for admin-only pipeline ID "%s"' % pipeline_id) else: raise _CallbackTaskError( 'External callback for internal-only pipeline ID "%s"' % pipeline_id) kwargs = {} for key in self.request.arguments(): if key != 'pipeline_id': kwargs[str(key)] = self.request.get(key) def perform_callback(): stage = pipeline_func_class.from_id(pipeline_id) if stage is None: raise _CallbackTaskError( 'Pipeline ID "%s" deleted during callback' % pipeline_id) return stage._callback_internal(kwargs) # callback_xg_transaction is a 3-valued setting (None=no trans, # False=1-eg-trans, True=xg-trans) if pipeline_func_class._callback_xg_transaction is not None: transaction_options = db.create_transaction_options( xg=pipeline_func_class._callback_xg_transaction) callback_result = db.run_in_transaction_options(transaction_options, perform_callback) else: callback_result = perform_callback() if callback_result is not None: status_code, content_type, content = callback_result self.response.set_status(status_code) self.response.headers['Content-Type'] = content_type self.response.out.write(content)
python
def run_callback(self): """Runs the callback for the pipeline specified in the request. Raises: _CallbackTaskError if something was wrong with the request parameters. """ pipeline_id = self.request.get('pipeline_id') if not pipeline_id: raise _CallbackTaskError('"pipeline_id" parameter missing.') pipeline_key = db.Key.from_path(_PipelineRecord.kind(), pipeline_id) pipeline_record = db.get(pipeline_key) if pipeline_record is None: raise _CallbackTaskError( 'Pipeline ID "%s" for callback does not exist.' % pipeline_id) params = pipeline_record.params real_class_path = params['class_path'] try: pipeline_func_class = mr_util.for_name(real_class_path) except ImportError, e: raise _CallbackTaskError( 'Cannot load class named "%s" for pipeline ID "%s".' % (real_class_path, pipeline_id)) if 'HTTP_X_APPENGINE_TASKNAME' not in self.request.environ: if pipeline_func_class.public_callbacks: pass elif pipeline_func_class.admin_callbacks: if not users.is_current_user_admin(): raise _CallbackTaskError( 'Unauthorized callback for admin-only pipeline ID "%s"' % pipeline_id) else: raise _CallbackTaskError( 'External callback for internal-only pipeline ID "%s"' % pipeline_id) kwargs = {} for key in self.request.arguments(): if key != 'pipeline_id': kwargs[str(key)] = self.request.get(key) def perform_callback(): stage = pipeline_func_class.from_id(pipeline_id) if stage is None: raise _CallbackTaskError( 'Pipeline ID "%s" deleted during callback' % pipeline_id) return stage._callback_internal(kwargs) # callback_xg_transaction is a 3-valued setting (None=no trans, # False=1-eg-trans, True=xg-trans) if pipeline_func_class._callback_xg_transaction is not None: transaction_options = db.create_transaction_options( xg=pipeline_func_class._callback_xg_transaction) callback_result = db.run_in_transaction_options(transaction_options, perform_callback) else: callback_result = perform_callback() if callback_result is not None: status_code, content_type, content = callback_result self.response.set_status(status_code) self.response.headers['Content-Type'] = content_type self.response.out.write(content)
Runs the callback for the pipeline specified in the request. Raises: _CallbackTaskError if something was wrong with the request parameters.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/pipeline.py#L2803-L2867
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/__init__.py
_fix_path
def _fix_path(): """Finds the google_appengine directory and fixes Python imports to use it.""" import os import sys all_paths = os.environ.get('PYTHONPATH').split(os.pathsep) for path_dir in all_paths: dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py') if os.path.exists(dev_appserver_path): logging.debug('Found appengine SDK on path!') google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path)) sys.path.append(google_appengine) # Use the next import will fix up sys.path even further to bring in # any dependent lib directories that the SDK needs. dev_appserver = __import__('dev_appserver') sys.path.extend(dev_appserver.EXTRA_PATHS) return
python
def _fix_path(): """Finds the google_appengine directory and fixes Python imports to use it.""" import os import sys all_paths = os.environ.get('PYTHONPATH').split(os.pathsep) for path_dir in all_paths: dev_appserver_path = os.path.join(path_dir, 'dev_appserver.py') if os.path.exists(dev_appserver_path): logging.debug('Found appengine SDK on path!') google_appengine = os.path.dirname(os.path.realpath(dev_appserver_path)) sys.path.append(google_appengine) # Use the next import will fix up sys.path even further to bring in # any dependent lib directories that the SDK needs. dev_appserver = __import__('dev_appserver') sys.path.extend(dev_appserver.EXTRA_PATHS) return
Finds the google_appengine directory and fixes Python imports to use it.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/__init__.py#L22-L37
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/models.py
_PipelineRecord.params
def params(self): """Returns the dictionary of parameters for this Pipeline.""" if hasattr(self, '_params_decoded'): return self._params_decoded if self.params_blob is not None: value_encoded = self.params_blob.open().read() else: value_encoded = self.params_text value = json.loads(value_encoded, cls=util.JsonDecoder) if isinstance(value, dict): kwargs = value.get('kwargs') if kwargs: adjusted_kwargs = {} for arg_key, arg_value in kwargs.iteritems(): # Python only allows non-unicode strings as keyword arguments. adjusted_kwargs[str(arg_key)] = arg_value value['kwargs'] = adjusted_kwargs self._params_decoded = value return self._params_decoded
python
def params(self): """Returns the dictionary of parameters for this Pipeline.""" if hasattr(self, '_params_decoded'): return self._params_decoded if self.params_blob is not None: value_encoded = self.params_blob.open().read() else: value_encoded = self.params_text value = json.loads(value_encoded, cls=util.JsonDecoder) if isinstance(value, dict): kwargs = value.get('kwargs') if kwargs: adjusted_kwargs = {} for arg_key, arg_value in kwargs.iteritems(): # Python only allows non-unicode strings as keyword arguments. adjusted_kwargs[str(arg_key)] = arg_value value['kwargs'] = adjusted_kwargs self._params_decoded = value return self._params_decoded
Returns the dictionary of parameters for this Pipeline.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/models.py#L96-L117
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/models.py
_SlotRecord.value
def value(self): """Returns the value of this Slot.""" if hasattr(self, '_value_decoded'): return self._value_decoded if self.value_blob is not None: encoded_value = self.value_blob.open().read() else: encoded_value = self.value_text self._value_decoded = json.loads(encoded_value, cls=util.JsonDecoder) return self._value_decoded
python
def value(self): """Returns the value of this Slot.""" if hasattr(self, '_value_decoded'): return self._value_decoded if self.value_blob is not None: encoded_value = self.value_blob.open().read() else: encoded_value = self.value_text self._value_decoded = json.loads(encoded_value, cls=util.JsonDecoder) return self._value_decoded
Returns the value of this Slot.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/models.py#L156-L167
GoogleCloudPlatform/appengine-pipelines
python/src/pipeline/models.py
_BarrierIndex.to_barrier_key
def to_barrier_key(cls, barrier_index_key): """Converts a _BarrierIndex key to a _BarrierRecord key. Args: barrier_index_key: db.Key for a _BarrierIndex entity. Returns: db.Key for the corresponding _BarrierRecord entity. """ barrier_index_path = barrier_index_key.to_path() # Pick out the items from the _BarrierIndex key path that we need to # construct the _BarrierRecord key path. (pipeline_kind, dependent_pipeline_id, unused_kind, purpose) = barrier_index_path[-4:] barrier_record_path = ( pipeline_kind, dependent_pipeline_id, _BarrierRecord.kind(), purpose) return db.Key.from_path(*barrier_record_path)
python
def to_barrier_key(cls, barrier_index_key): """Converts a _BarrierIndex key to a _BarrierRecord key. Args: barrier_index_key: db.Key for a _BarrierIndex entity. Returns: db.Key for the corresponding _BarrierRecord entity. """ barrier_index_path = barrier_index_key.to_path() # Pick out the items from the _BarrierIndex key path that we need to # construct the _BarrierRecord key path. (pipeline_kind, dependent_pipeline_id, unused_kind, purpose) = barrier_index_path[-4:] barrier_record_path = ( pipeline_kind, dependent_pipeline_id, _BarrierRecord.kind(), purpose) return db.Key.from_path(*barrier_record_path)
Converts a _BarrierIndex key to a _BarrierRecord key. Args: barrier_index_key: db.Key for a _BarrierIndex entity. Returns: db.Key for the corresponding _BarrierRecord entity.
https://github.com/GoogleCloudPlatform/appengine-pipelines/blob/277394648dac3e8214677af898935d07399ac8e1/python/src/pipeline/models.py#L251-L271
bcbnz/pylabels
labels/sheet.py
Sheet.partial_page
def partial_page(self, page, used_labels): """Allows a page to be marked as already partially used so you can generate a PDF to print on the remaining labels. Parameters ---------- page: positive integer The page number to mark as partially used. The page must not have already been started, i.e., for page 1 this must be called before any labels have been started, for page 2 this must be called before the first page is full and so on. used_labels: iterable An iterable of (row, column) pairs marking which labels have been used already. The rows and columns must be within the bounds of the sheet. """ # Check the page number is valid. if page <= self.page_count: raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page)) # Add these to any existing labels marked as used. used = self._used.get(page, set()) for row, column in used_labels: # Check the index is valid. if row < 1 or row > self.specs.rows: raise IndexError("Invalid row number: {0:d}.".format(row)) if column < 1 or column > self.specs.columns: raise IndexError("Invalid column number: {0:d}.".format(column)) # Add it. used.add((int(row), int(column))) # Save the details. self._used[page] = used
python
def partial_page(self, page, used_labels): """Allows a page to be marked as already partially used so you can generate a PDF to print on the remaining labels. Parameters ---------- page: positive integer The page number to mark as partially used. The page must not have already been started, i.e., for page 1 this must be called before any labels have been started, for page 2 this must be called before the first page is full and so on. used_labels: iterable An iterable of (row, column) pairs marking which labels have been used already. The rows and columns must be within the bounds of the sheet. """ # Check the page number is valid. if page <= self.page_count: raise ValueError("Page {0:d} has already started, cannot mark used labels now.".format(page)) # Add these to any existing labels marked as used. used = self._used.get(page, set()) for row, column in used_labels: # Check the index is valid. if row < 1 or row > self.specs.rows: raise IndexError("Invalid row number: {0:d}.".format(row)) if column < 1 or column > self.specs.columns: raise IndexError("Invalid column number: {0:d}.".format(column)) # Add it. used.add((int(row), int(column))) # Save the details. self._used[page] = used
Allows a page to be marked as already partially used so you can generate a PDF to print on the remaining labels. Parameters ---------- page: positive integer The page number to mark as partially used. The page must not have already been started, i.e., for page 1 this must be called before any labels have been started, for page 2 this must be called before the first page is full and so on. used_labels: iterable An iterable of (row, column) pairs marking which labels have been used already. The rows and columns must be within the bounds of the sheet.
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L205-L239
bcbnz/pylabels
labels/sheet.py
Sheet._new_page
def _new_page(self): """Helper function to start a new page. Not intended for external use. """ self._current_page = Drawing(*self._pagesize) if self._bgimage: self._current_page.add(self._bgimage) self._pages.append(self._current_page) self.page_count += 1 self._position = [1, 0]
python
def _new_page(self): """Helper function to start a new page. Not intended for external use. """ self._current_page = Drawing(*self._pagesize) if self._bgimage: self._current_page.add(self._bgimage) self._pages.append(self._current_page) self.page_count += 1 self._position = [1, 0]
Helper function to start a new page. Not intended for external use.
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L241-L250
bcbnz/pylabels
labels/sheet.py
Sheet._next_label
def _next_label(self): """Helper method to move to the next label. Not intended for external use. This does not increment the label_count attribute as the next label may not be usable (it may have been marked as missing through partial_pages). See _next_unused_label for generally more useful method. """ # Special case for the very first label. if self.page_count == 0: self._new_page() # Filled up a page. elif self._position == self._numlabels: self._new_page() # Filled up a row. elif self._position[1] == self.specs.columns: self._position[0] += 1 self._position[1] = 0 # Move to the next column. self._position[1] += 1
python
def _next_label(self): """Helper method to move to the next label. Not intended for external use. This does not increment the label_count attribute as the next label may not be usable (it may have been marked as missing through partial_pages). See _next_unused_label for generally more useful method. """ # Special case for the very first label. if self.page_count == 0: self._new_page() # Filled up a page. elif self._position == self._numlabels: self._new_page() # Filled up a row. elif self._position[1] == self.specs.columns: self._position[0] += 1 self._position[1] = 0 # Move to the next column. self._position[1] += 1
Helper method to move to the next label. Not intended for external use. This does not increment the label_count attribute as the next label may not be usable (it may have been marked as missing through partial_pages). See _next_unused_label for generally more useful method.
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L252-L274
bcbnz/pylabels
labels/sheet.py
Sheet._next_unused_label
def _next_unused_label(self): """Helper method to move to the next unused label. Not intended for external use. This method will shade in any missing labels if desired, and will increment the label_count attribute once a suitable label position has been found. """ self._next_label() # This label may be missing. if self.page_count in self._used: # Keep try while the label is missing. missing = self._used.get(self.page_count, set()) while tuple(self._position) in missing: # Throw the missing information away now we have used it. This # allows the _shade_remaining_missing method to work. missing.discard(tuple(self._position)) # Shade the missing label if desired. if self.shade_missing: self._shade_missing_label() # Try our luck with the next label. self._next_label() missing = self._used.get(self.page_count, set()) # Increment the count now we have found a suitable position. self.label_count += 1
python
def _next_unused_label(self): """Helper method to move to the next unused label. Not intended for external use. This method will shade in any missing labels if desired, and will increment the label_count attribute once a suitable label position has been found. """ self._next_label() # This label may be missing. if self.page_count in self._used: # Keep try while the label is missing. missing = self._used.get(self.page_count, set()) while tuple(self._position) in missing: # Throw the missing information away now we have used it. This # allows the _shade_remaining_missing method to work. missing.discard(tuple(self._position)) # Shade the missing label if desired. if self.shade_missing: self._shade_missing_label() # Try our luck with the next label. self._next_label() missing = self._used.get(self.page_count, set()) # Increment the count now we have found a suitable position. self.label_count += 1
Helper method to move to the next unused label. Not intended for external use. This method will shade in any missing labels if desired, and will increment the label_count attribute once a suitable label position has been found.
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L276-L304
bcbnz/pylabels
labels/sheet.py
Sheet._calculate_edges
def _calculate_edges(self): """Calculate edges of the current label. Not intended for external use. """ # Calculate the left edge of the label. left = self.specs.left_margin left += (self.specs.label_width * (self._position[1] - 1)) if self.specs.column_gap: left += (self.specs.column_gap * (self._position[1] - 1)) left *= mm # And the bottom. bottom = self.specs.sheet_height - self.specs.top_margin bottom -= (self.specs.label_height * self._position[0]) if self.specs.row_gap: bottom -= (self.specs.row_gap * (self._position[0] - 1)) bottom *= mm # Done. return float(left), float(bottom)
python
def _calculate_edges(self): """Calculate edges of the current label. Not intended for external use. """ # Calculate the left edge of the label. left = self.specs.left_margin left += (self.specs.label_width * (self._position[1] - 1)) if self.specs.column_gap: left += (self.specs.column_gap * (self._position[1] - 1)) left *= mm # And the bottom. bottom = self.specs.sheet_height - self.specs.top_margin bottom -= (self.specs.label_height * self._position[0]) if self.specs.row_gap: bottom -= (self.specs.row_gap * (self._position[0] - 1)) bottom *= mm # Done. return float(left), float(bottom)
Calculate edges of the current label. Not intended for external use.
https://github.com/bcbnz/pylabels/blob/ecdb4ca48061d8f1dc0fcfe2d55ce2b89e0e5ec6/labels/sheet.py#L306-L326