repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
flink
|
flink-master/flink-python/pyflink/table/expression.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from typing import Union, TypeVar, Generic, Any
from pyflink import add_version_doc
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, DataTypes, _to_java_data_type
from pyflink.util.java_utils import to_jarray
__all__ = [
'Expression',
'TimeIntervalUnit',
'TimePointUnit',
'JsonType',
'JsonExistsOnError',
'JsonValueOnEmptyOrError',
'JsonQueryWrapper',
'JsonQueryOnEmptyOrError'
]
_aggregation_doc = """
{op_desc}
Example:
::
>>> tab \\
>>> .group_by(col("a")) \\
>>> .select(col("a"),
>>> col("b").sum.alias("d"),
>>> col("b").sum0.alias("e"),
>>> col("b").min.alias("f"),
>>> col("b").max.alias("g"),
>>> col("b").count.alias("h"),
>>> col("b").avg.alias("i"),
>>> col("b").stddev_pop.alias("j"),
>>> col("b").stddev_samp.alias("k"),
>>> col("b").var_pop.alias("l"),
>>> col("b").var_samp.alias("m"),
>>> col("b").collect.alias("n"))
.. seealso:: :py:attr:`~Expression.sum`, :py:attr:`~Expression.sum0`, :py:attr:`~Expression.min`,
:py:attr:`~Expression.max`, :py:attr:`~Expression.count`, :py:attr:`~Expression.avg`,
:py:attr:`~Expression.stddev_pop`, :py:attr:`~Expression.stddev_samp`,
:py:attr:`~Expression.var_pop`, :py:attr:`~Expression.var_samp`,
:py:attr:`~Expression.collect`
"""
_math_log_doc = """
{op_desc}
.. seealso:: :py:attr:`~Expression.log10`, :py:attr:`~Expression.log2`, :py:attr:`~Expression.ln`,
:func:`~Expression.log`
"""
_math_trigonometric_doc = """
Calculates the {op_desc} of a given number.
.. seealso:: :py:attr:`~Expression.sin`, :py:attr:`~Expression.cos`, :py:attr:`~Expression.sinh`,
:py:attr:`~Expression.cosh`, :py:attr:`~Expression.tan`, :py:attr:`~Expression.cot`,
:py:attr:`~Expression.asin`, :py:attr:`~Expression.acos`, :py:attr:`~Expression.atan`,
:py:attr:`~Expression.tanh`
"""
_string_doc_seealso = """
.. seealso:: :func:`~Expression.trim_leading`, :func:`~Expression.trim_trailing`,
:func:`~Expression.trim`, :func:`~Expression.replace`,
:py:attr:`~Expression.char_length`, :py:attr:`~Expression.upper_case`,
:py:attr:`~Expression.lower_case`, :py:attr:`~Expression.init_cap`,
:func:`~Expression.like`, :func:`~Expression.similar`,
:func:`~Expression.position`, :func:`~Expression.lpad`, :func:`~Expression.rpad`,
:func:`~Expression.overlay`, :func:`~Expression.regexp_replace`,
:func:`~Expression.regexp_extract`, :func:`~Expression.substring`,
:py:attr:`~Expression.from_base64`, :py:attr:`~Expression.to_base64`,
:py:attr:`~Expression.ltrim`, :py:attr:`~Expression.rtrim`, :func:`~Expression.repeat`
"""
_temporal_doc_seealso = """
.. seealso:: :py:attr:`~Expression.to_date`, :py:attr:`~Expression.to_time`,
:py:attr:`~Expression.to_timestamp`, :func:`~Expression.extract`,
:func:`~Expression.floor`, :func:`~Expression.ceil`
"""
_time_doc = """
Creates an interval of the given number of {op_desc}.
The produced expression is of type :func:`~DataTypes.INTERVAL`.
.. seealso:: :py:attr:`~Expression.year`, :py:attr:`~Expression.years`,
:py:attr:`~Expression.quarter`, :py:attr:`~Expression.quarters`,
:py:attr:`~Expression.month`, :py:attr:`~Expression.months`,
:py:attr:`~Expression.week`, :py:attr:`~Expression.weeks`, :py:attr:`~Expression.day`,
:py:attr:`~Expression.days`, :py:attr:`~Expression.hour`, :py:attr:`~Expression.hours`,
:py:attr:`~Expression.minute`, :py:attr:`~Expression.minutes`,
:py:attr:`~Expression.second`, :py:attr:`~Expression.seconds`,
:py:attr:`~Expression.milli`, :py:attr:`~Expression.millis`
"""
_hash_doc = """
Returns the {op_desc} hash of the string argument; null if string is null.
:return: string of {bit} hexadecimal digits or null.
.. seealso:: :py:attr:`~Expression.md5`, :py:attr:`~Expression.sha1`, :py:attr:`~Expression.sha224`,
:py:attr:`~Expression.sha256`, :py:attr:`~Expression.sha384`,
:py:attr:`~Expression.sha512`, :py:attr:`~Expression.sha2`
"""
def _make_math_log_doc():
math_log_funcs = {
Expression.log10: "Calculates the base 10 logarithm of the given value.",
Expression.log2: "Calculates the base 2 logarithm of the given value.",
Expression.ln: "Calculates the natural logarithm of the given value.",
Expression.log: "Calculates the natural logarithm of the given value if base is not "
"specified. Otherwise, calculates the logarithm of the given value to the "
"given base.",
}
for func, op_desc in math_log_funcs.items():
func.__doc__ = _math_log_doc.format(op_desc=op_desc)
def _make_math_trigonometric_doc():
math_trigonometric_funcs = {
Expression.cosh: "hyperbolic cosine",
Expression.sinh: "hyperbolic sine",
Expression.sin: "sine",
Expression.cos: "cosine",
Expression.tan: "tangent",
Expression.cot: "cotangent",
Expression.asin: "arc sine",
Expression.acos: "arc cosine",
Expression.atan: "arc tangent",
Expression.tanh: "hyperbolic tangent",
}
for func, op_desc in math_trigonometric_funcs.items():
func.__doc__ = _math_trigonometric_doc.format(op_desc=op_desc)
def _make_aggregation_doc():
aggregation_funcs = {
Expression.sum: "Returns the sum of the numeric field across all input values. "
"If all values are null, null is returned.",
Expression.sum0: "Returns the sum of the numeric field across all input values. "
"If all values are null, 0 is returned.",
Expression.min: "Returns the minimum value of field across all input values.",
Expression.max: "Returns the maximum value of field across all input values.",
Expression.count: "Returns the number of input rows for which the field is not null.",
Expression.avg: "Returns the average (arithmetic mean) of the numeric field across all "
"input values.",
Expression.stddev_pop: "Returns the population standard deviation of an expression(the "
"square root of var_pop).",
Expression.stddev_samp: "Returns the sample standard deviation of an expression(the square "
"root of var_samp).",
Expression.var_pop: "Returns the population standard variance of an expression.",
Expression.var_samp: "Returns the sample variance of a given expression.",
Expression.collect: "Returns multiset aggregate of a given expression.",
}
for func, op_desc in aggregation_funcs.items():
func.__doc__ = _aggregation_doc.format(op_desc=op_desc)
def _make_string_doc():
string_funcs = [
Expression.substring, Expression.trim_leading, Expression.trim_trailing, Expression.trim,
Expression.replace, Expression.char_length, Expression.upper_case, Expression.lower_case,
Expression.init_cap, Expression.like, Expression.similar, Expression.position,
Expression.lpad, Expression.rpad, Expression.overlay, Expression.regexp_replace,
Expression.regexp_extract, Expression.from_base64, Expression.to_base64,
Expression.ltrim, Expression.rtrim, Expression.repeat
]
for func in string_funcs:
func.__doc__ = func.__doc__.replace(' ', '') + _string_doc_seealso
def _make_temporal_doc():
temporal_funcs = [
Expression.to_date, Expression.to_time, Expression.to_timestamp, Expression.extract,
Expression.floor, Expression.ceil
]
for func in temporal_funcs:
func.__doc__ = func.__doc__.replace(' ', '') + _temporal_doc_seealso
def _make_time_doc():
time_funcs = {
Expression.year: "years",
Expression.years: "years",
Expression.quarter: "quarters",
Expression.quarters: "quarters",
Expression.month: "months",
Expression.months: "months",
Expression.week: "weeks",
Expression.weeks: "weeks",
Expression.day: "days",
Expression.days: "days",
Expression.hour: "hours",
Expression.hours: "hours",
Expression.minute: "minutes",
Expression.minutes: "minutes",
Expression.second: "seconds",
Expression.seconds: "seconds",
Expression.milli: "millis",
Expression.millis: "millis"
}
for func, op_desc in time_funcs.items():
func.__doc__ = _time_doc.format(op_desc=op_desc)
def _make_hash_doc():
hash_funcs = {
Expression.md5: ("MD5", 32),
Expression.sha1: ("SHA-1", 40),
Expression.sha224: ("SHA-224", 56),
Expression.sha256: ("SHA-256", 64),
Expression.sha384: ("SHA-384", 96),
Expression.sha512: ("SHA-512", 128)
}
for func, (op_desc, bit) in hash_funcs.items():
func.__doc__ = _hash_doc.format(op_desc=op_desc, bit=bit)
def _add_version_doc():
for func_name in dir(Expression):
if not func_name.startswith("_"):
add_version_doc(getattr(Expression, func_name), "1.12.0")
def _get_java_expression(expr, to_expr: bool = False):
"""
Returns the Java expression for the given expr. If expr is a Python expression, returns the
underlying Java expression, otherwise, convert it to a Java expression if to_expr is true.
"""
if isinstance(expr, Expression):
return expr._j_expr
elif to_expr:
gateway = get_gateway()
return gateway.jvm.Expressions.lit(expr)
else:
return expr
def _unary_op(op_name: str):
def _(self) -> 'Expression':
return Expression(getattr(self._j_expr, op_name)())
return _
def _binary_op(op_name: str, reverse: bool = False):
def _(self, other) -> 'Expression':
if reverse:
return Expression(getattr(_get_java_expression(other, True), op_name)(self._j_expr))
else:
return Expression(getattr(self._j_expr, op_name)(_get_java_expression(other)))
return _
def _ternary_op(op_name: str):
def _(self, first, second) -> 'Expression':
return Expression(getattr(self._j_expr, op_name)(
_get_java_expression(first), _get_java_expression(second)))
return _
def _varargs_op(op_name: str):
def _(self, *args) -> 'Expression':
return Expression(
getattr(self._j_expr, op_name)(*[_get_java_expression(arg) for arg in args]))
return _
def _expressions_op(op_name: str):
def _(self, *args) -> 'Expression':
from pyflink.table import expressions
return getattr(expressions, op_name)(self, *[_get_java_expression(arg) for arg in args])
return _
class TimeIntervalUnit(Enum):
"""
Units for working with time intervals.
.. versionadded:: 1.12.0
"""
YEAR = 0,
YEAR_TO_MONTH = 1,
QUARTER = 2,
MONTH = 3,
WEEK = 4,
DAY = 5,
DAY_TO_HOUR = 6,
DAY_TO_MINUTE = 7,
DAY_TO_SECOND = 8,
HOUR = 9,
SECOND = 10,
HOUR_TO_MINUTE = 11,
HOUR_TO_SECOND = 12,
MINUTE = 13,
MINUTE_TO_SECOND = 14
def _to_j_time_interval_unit(self):
gateway = get_gateway()
JTimeIntervalUnit = gateway.jvm.org.apache.flink.table.expressions.TimeIntervalUnit
return getattr(JTimeIntervalUnit, self.name)
class TimePointUnit(Enum):
"""
Units for working with points in time.
.. versionadded:: 1.12.0
"""
YEAR = 0,
MONTH = 1,
DAY = 2,
HOUR = 3,
MINUTE = 4,
SECOND = 5,
QUARTER = 6,
WEEK = 7,
MILLISECOND = 8,
MICROSECOND = 9
def _to_j_time_point_unit(self):
gateway = get_gateway()
JTimePointUnit = gateway.jvm.org.apache.flink.table.expressions.TimePointUnit
return getattr(JTimePointUnit, self.name)
class JsonType(Enum):
"""
Types of JSON objects for is_json().
"""
VALUE = 0,
SCALAR = 1,
ARRAY = 2,
OBJECT = 3
def _to_j_json_type(self):
gateway = get_gateway()
JJsonType = gateway.jvm.org.apache.flink.table.api.JsonType
return getattr(JJsonType, self.name)
class JsonExistsOnError(Enum):
"""
Behavior in case of errors for json_exists().
"""
TRUE = 0,
FALSE = 1,
UNKNOWN = 2,
ERROR = 3
def _to_j_json_exists_on_error(self):
gateway = get_gateway()
JJsonExistsOnError = gateway.jvm.org.apache.flink.table.api.JsonExistsOnError
return getattr(JJsonExistsOnError, self.name)
class JsonValueOnEmptyOrError(Enum):
"""
Behavior in case of emptiness or errors for json_value().
"""
NULL = 0,
ERROR = 1,
DEFAULT = 2
def _to_j_json_value_on_empty_or_error(self):
gateway = get_gateway()
JJsonValueOnEmptyOrError = gateway.jvm.org.apache.flink.table.api.JsonValueOnEmptyOrError
return getattr(JJsonValueOnEmptyOrError, self.name)
class JsonQueryWrapper(Enum):
"""
Defines whether and when to wrap the result of json_query() into an array.
"""
WITHOUT_ARRAY = 0,
CONDITIONAL_ARRAY = 1,
UNCONDITIONAL_ARRAY = 2
def _to_j_json_query_wrapper(self):
gateway = get_gateway()
JJsonQueryWrapper = gateway.jvm.org.apache.flink.table.api.JsonQueryWrapper
return getattr(JJsonQueryWrapper, self.name)
class JsonQueryOnEmptyOrError(Enum):
"""
Defines the behavior of json_query() in case of emptiness or errors.
"""
NULL = 0,
EMPTY_ARRAY = 1,
EMPTY_OBJECT = 2,
ERROR = 3
def _to_j_json_query_on_error_or_empty(self):
gateway = get_gateway()
JJsonQueryOnEmptyOrError = gateway.jvm.org.apache.flink.table.api.JsonQueryOnEmptyOrError
return getattr(JJsonQueryOnEmptyOrError, self.name)
class JsonOnNull(Enum):
"""
Behavior for entries with a null value for json_object().
"""
NULL = 0,
ABSENT = 1
def _to_j_json_on_null(self):
gateway = get_gateway()
JJsonOnNull = gateway.jvm.org.apache.flink.table.api.JsonOnNull
return getattr(JJsonOnNull, self.name)
T = TypeVar('T')
class Expression(Generic[T]):
"""
Expressions represent a logical tree for producing a computation result.
Expressions might be literal values, function calls, or field references.
.. versionadded:: 1.12.0
"""
def __init__(self, j_expr_or_property_name):
self._j_expr_or_property_name = j_expr_or_property_name
__abs__ = _unary_op("abs")
# comparison functions
__eq__ = _binary_op("isEqual")
__ne__ = _binary_op("isNotEqual")
__lt__ = _binary_op("isLess")
__gt__ = _binary_op("isGreater")
__le__ = _binary_op("isLessOrEqual")
__ge__ = _binary_op("isGreaterOrEqual")
# logic functions
__and__ = _binary_op("and")
__or__ = _binary_op("or")
__invert__ = _unary_op('isNotTrue')
__rand__ = _binary_op("and")
__ror__ = _binary_op("or")
# arithmetic functions
__add__ = _binary_op("plus")
__sub__ = _binary_op("minus")
__mul__ = _binary_op("times")
__truediv__ = _binary_op("dividedBy")
__mod__ = _binary_op("mod")
__pow__ = _binary_op("power")
__neg__ = _expressions_op("negative")
__radd__ = _binary_op("plus", True)
__rsub__ = _binary_op("minus", True)
__rmul__ = _binary_op("times")
__rtruediv__ = _binary_op("dividedBy", True)
__rmod__ = _binary_op("mod", True)
__rpow__ = _binary_op("power", True)
def __str__(self):
return self._j_expr.asSummaryString()
def __getattr__(self, name):
if name == '_j_expr':
if isinstance(self._j_expr_or_property_name, str):
gateway = get_gateway()
return getattr(gateway.jvm.Expressions, self._j_expr_or_property_name)
else:
return self._j_expr_or_property_name
return self.get(name)
def __getitem__(self, index):
return self.at(index)
# ---------------------------- arithmetic functions ----------------------------------
@property
def exp(self) -> 'Expression[float]':
"""
Calculates the Euler's number raised to the given power.
"""
return _unary_op("exp")(self)
@property
def log10(self) -> 'Expression[float]':
return _unary_op("log10")(self)
@property
def log2(self) -> 'Expression[float]':
return _unary_op("log2")(self)
@property
def ln(self) -> 'Expression[float]':
return _unary_op("ln")(self)
def log(self, base=None) -> 'Expression[float]':
if base is None:
return _unary_op("log")(self)
else:
return _binary_op("log")(self, base)
@property
def cosh(self) -> 'Expression[float]':
return _unary_op("cosh")(self)
@property
def sinh(self) -> 'Expression[float]':
return _unary_op("sinh")(self)
@property
def sin(self) -> 'Expression[float]':
return _unary_op("sin")(self)
@property
def cos(self) -> 'Expression[float]':
return _unary_op("cos")(self)
@property
def tan(self) -> 'Expression[float]':
return _unary_op("tan")(self)
@property
def cot(self) -> 'Expression[float]':
return _unary_op("cot")(self)
@property
def asin(self) -> 'Expression[float]':
return _unary_op("asin")(self)
@property
def acos(self) -> 'Expression[float]':
return _unary_op("acos")(self)
@property
def atan(self) -> 'Expression[float]':
return _unary_op("atan")(self)
@property
def tanh(self) -> 'Expression[float]':
return _unary_op("tanh")(self)
@property
def degrees(self) -> 'Expression[float]':
"""
Converts numeric from radians to degrees.
.. seealso:: :py:attr:`~Expression.radians`
"""
return _unary_op("degrees")(self)
@property
def radians(self) -> 'Expression[float]':
"""
Converts numeric from degrees to radians.
.. seealso:: :py:attr:`~Expression.degrees`
"""
return _unary_op("radians")(self)
@property
def sqrt(self) -> 'Expression[float]':
"""
Calculates the square root of a given value.
"""
return _unary_op("sqrt")(self)
@property
def abs(self) -> 'Expression[T]':
"""
Calculates the absolute value of given value.
"""
return _unary_op("abs")(self)
@property
def sign(self) -> 'Expression[T]':
"""
Calculates the signum of a given number.
e.g. `lit(1.23).sign` leads to `1.00`, `lit(-1.23).sign` leads to `-1.00`.
"""
return _unary_op("sign")(self)
def round(self, places: Union[int, 'Expression[int]']):
"""
Rounds the given number to integer places right to the decimal point.
e.g. `lit(646.646).round(2)` leads to `646.65`, `lit(646.646).round(3)` leads to `646.646`,
`lit(646.646).round(0)` leads to `647`, `lit(646.646).round(-2)` leads to `600`.
"""
return _binary_op("round")(self, places)
def between(self, lower_bound, upper_bound) -> 'Expression[bool]':
"""
Returns true if the given expression is between lower_bound and upper_bound
(both inclusive). False otherwise. The parameters must be numeric types or identical
comparable types.
e.g. `lit(2.1).between(2.1, 2.1)` leads to `true`,
`lit("2018-05-05").to_date.between(lit("2018-05-01").to_date, lit("2018-05-10").to_date)`
leads to `true`.
:param lower_bound: numeric or comparable expression
:param upper_bound: numeric or comparable expression
.. seealso:: :func:`~Expression.not_between`
"""
return _ternary_op("between")(self, lower_bound, upper_bound)
def not_between(self, lower_bound, upper_bound) -> 'Expression[bool]':
"""
Returns true if the given expression is not between lower_bound and upper_bound
(both inclusive). False otherwise. The parameters must be numeric types or identical
comparable types.
e.g. `lit(2.1).not_between(2.1, 2.1)` leads to `false`,
`lit("2018-05-05").to_date.not_between(lit("2018-05-01").to_date,
lit("2018-05-10").to_date)` leads to `false`.
:param lower_bound: numeric or comparable expression
:param upper_bound: numeric or comparable expression
.. seealso:: :func:`~Expression.between`
"""
return _ternary_op("notBetween")(self, lower_bound, upper_bound)
def then(self, if_true, if_false) -> 'Expression':
"""
Ternary conditional operator that decides which of two other expressions should be evaluated
based on a evaluated boolean condition.
e.g. lit(42).is_greater(5).then("A", "B") leads to "A"
:param if_true: expression to be evaluated if condition holds
:param if_false: expression to be evaluated if condition does not hold
"""
return _ternary_op("then")(self, if_true, if_false)
def if_null(self, null_replacement) -> 'Expression':
"""
Returns null_replacement if the given expression is null; otherwise the expression is
returned.
This function returns a data type that is very specific in terms of nullability. The
returned type is the common type of both arguments but only nullable if the
null_replacement is nullable.
The function allows to pass nullable columns into a function or table that is declared
with a NOT NULL constraint.
e.g. col("nullable_column").if_null(5) returns never null.
"""
return _binary_op("ifNull")(self, null_replacement)
@property
def is_null(self) -> 'Expression[bool]':
"""
Returns true if the given expression is null.
.. seealso:: :py:attr:`~Expression.is_not_null`
"""
return _unary_op("isNull")(self)
@property
def is_not_null(self) -> 'Expression[bool]':
"""
Returns true if the given expression is not null.
.. seealso:: :py:attr:`~Expression.is_null`
"""
return _unary_op("isNotNull")(self)
@property
def is_true(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is true. False otherwise (for null and false).
.. seealso:: :py:attr:`~Expression.is_false`, :py:attr:`~Expression.is_not_true`,
:py:attr:`~Expression.is_not_false`
"""
return _unary_op("isTrue")(self)
@property
def is_false(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is false. False otherwise (for null and true).
.. seealso:: :py:attr:`~Expression.is_true`, :py:attr:`~Expression.is_not_true`,
:py:attr:`~Expression.is_not_false`
"""
return _unary_op("isFalse")(self)
@property
def is_not_true(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is not true (for null and false). False otherwise.
.. seealso:: :py:attr:`~Expression.is_true`, :py:attr:`~Expression.is_false`,
:py:attr:`~Expression.is_not_false`
"""
return _unary_op("isNotTrue")(self)
@property
def is_not_false(self) -> 'Expression[bool]':
"""
Returns true if given boolean expression is not false (for null and true). False otherwise.
.. seealso:: :py:attr:`~Expression.is_true`, :py:attr:`~Expression.is_false`,
:py:attr:`~Expression.is_not_true`
"""
return _unary_op("isNotFalse")(self)
@property
def distinct(self) -> 'Expression':
"""
Similar to a SQL distinct aggregation clause such as COUNT(DISTINCT a), declares that an
aggregation function is only applied on distinct input values.
Example:
::
>>> tab \\
>>> .group_by(col("a")) \\
>>> .select(col("a"), col("b").sum.distinct.alias("d"))
"""
return _unary_op("distinct")(self)
@property
def sum(self) -> 'Expression':
return _unary_op("sum")(self)
@property
def sum0(self) -> 'Expression':
return _unary_op("sum0")(self)
@property
def min(self) -> 'Expression':
return _unary_op("min")(self)
@property
def max(self) -> 'Expression':
return _unary_op("max")(self)
@property
def count(self) -> 'Expression':
return _unary_op("count")(self)
@property
def avg(self) -> 'Expression':
return _unary_op("avg")(self)
@property
def first_value(self) -> 'Expression':
"""
Returns the first value of field across all input values.
"""
return _unary_op("firstValue")(self)
@property
def last_value(self) -> 'Expression':
"""
Returns the last value of field across all input values.
"""
return _unary_op("lastValue")(self)
def list_agg(self, separator: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Concatenates the values of string expressions and places separator values between them.
The separator is not added at the end of string. The default value of separator is ‘,’.
"""
if separator is None:
return _unary_op("listAgg")(self)
else:
return _binary_op("listAgg")(self, separator)
@property
def stddev_pop(self) -> 'Expression':
return _unary_op("stddevPop")(self)
@property
def stddev_samp(self) -> 'Expression':
return _unary_op("stddevSamp")(self)
@property
def var_pop(self) -> 'Expression':
return _unary_op("varPop")(self)
@property
def var_samp(self) -> 'Expression':
return _unary_op("varSamp")(self)
@property
def collect(self) -> 'Expression':
return _unary_op("collect")(self)
def alias(self, name: str, *extra_names: str) -> 'Expression[T]':
"""
Specifies a name for an expression i.e. a field.
Example:
::
>>> tab.select(col('a').alias('b'))
:param name: name for one field.
:param extra_names: additional names if the expression expands to multiple fields
"""
gateway = get_gateway()
return _ternary_op("as")(self, name, to_jarray(gateway.jvm.String, extra_names))
def cast(self, data_type: DataType) -> 'Expression':
"""
Returns a new value being cast to type type.
A cast error throws an exception and fails the job.
When performing a cast operation that may fail, like STRING to INT,
one should rather use try_cast, in order to handle errors.
If "table.exec.legacy-cast-behaviour" is enabled, cast behaves like try_cast.
E.g. lit("4").cast(DataTypes.INT()) returns 42;
lit(null).cast(DataTypes.STRING()) returns NULL of type STRING;
lit("non-number").cast(DataTypes.INT()) throws an exception and fails the job.
"""
return _binary_op("cast")(self, _to_java_data_type(data_type))
def try_cast(self, data_type: DataType) -> 'Expression':
"""
Like cast, but in case of error, returns NULL rather than failing the job.
E.g. lit("42").try_cast(DataTypes.INT()) returns 42;
lit(null).try_cast(DataTypes.STRING()) returns NULL of type STRING;
lit("non-number").cast(DataTypes.INT()) returns NULL of type INT.
coalesce(lit("non-number").cast(DataTypes.INT()), lit(0)) returns 0 of type INT.
"""
return _binary_op("tryCast")(self, _to_java_data_type(data_type))
@property
def asc(self) -> 'Expression':
"""
Specifies ascending order of an expression i.e. a field for order_by.
Example:
::
>>> tab.order_by(col('a').asc)
.. seealso:: :py:attr:`~Expression.desc`
"""
return _unary_op("asc")(self)
@property
def desc(self) -> 'Expression':
"""
Specifies descending order of an expression i.e. a field for order_by.
Example:
::
>>> tab.order_by(col('a').desc)
.. seealso:: :py:attr:`~Expression.asc`
"""
return _unary_op("desc")(self)
def in_(self, first_element_or_table, *remaining_elements) -> 'Expression':
"""
If first_element_or_table is a Table, Returns true if an expression exists in a given table
sub-query. The sub-query table must consist of one column. This column must have the same
data type as the expression.
.. note::
This operation is not supported in a streaming environment yet if
first_element_or_table is a Table.
Otherwise, Returns true if an expression exists in a given list of expressions. This is a
shorthand for multiple OR conditions.
If the testing set contains null, the result will be null if the element can not be found
and true if it can be found. If the element is null, the result is always null.
e.g. lit("42").in(1, 2, 3) leads to false.
Example:
::
>>> tab.where(col("a").in_(1, 2, 3))
>>> table_a.where(col("x").in_(table_b.select(col("y"))))
"""
from pyflink.table import Table
if isinstance(first_element_or_table, Table):
assert len(remaining_elements) == 0
return _binary_op("in")(self, first_element_or_table._j_table)
else:
gateway = get_gateway()
ApiExpressionUtils = gateway.jvm.org.apache.flink.table.expressions.ApiExpressionUtils
remaining_elements = (first_element_or_table, *remaining_elements)
exprs = [ApiExpressionUtils.objectToExpression(_get_java_expression(e))
for e in remaining_elements]
return _binary_op("in")(self, to_jarray(gateway.jvm.Object, exprs))
@property
def start(self) -> 'Expression':
"""
Returns the start time (inclusive) of a window when applied on a window reference.
Example:
::
>>> tab.window(Tumble
>>> .over(row_interval(2))
>>> .on(col("a"))
>>> .alias("w")) \\
>>> .group_by(col("c"), col("w")) \\
>>> .select(col("c"), col("w").start, col("w").end, col("w").proctime)
.. seealso:: :py:attr:`~Expression.end`
"""
return _unary_op("start")(self)
@property
def end(self) -> 'Expression':
"""
Returns the end time (exclusive) of a window when applied on a window reference.
e.g. if a window ends at 10:59:59.999 this property will return 11:00:00.000.
Example:
::
>>> orders.window(Tumble
>>> .over(row_interval(2))
>>> .on(col("a"))
>>> .alias("w")) \\
>>> .group_by(col("c"), col("w")) \\
>>> .select(col("c"), col("w").start, col("w").end, col("w").proctime)
.. seealso:: :py:attr:`~Expression.start`
"""
return _unary_op("end")(self)
@property
def bin(self) -> 'Expression[str]':
"""
Returns a string representation of an integer numeric value in binary format. Returns null
if numeric is null. E.g. "4" leads to "100", "12" leads to "1100".
.. seealso:: :py:attr:`~Expression.hex`
"""
return _unary_op("bin")(self)
@property
def hex(self) -> 'Expression[str]':
"""
Returns a string representation of an integer numeric value or a string in hex format.
Returns null if numeric or string is null.
E.g. a numeric 20 leads to "14", a numeric 100 leads to "64", and a string "hello,world"
leads to "68656c6c6f2c776f726c64".
.. seealso:: :py:attr:`~Expression.bin`
"""
return _unary_op("hex")(self)
def truncate(self, n: Union[int, 'Expression[int]'] = 0) -> 'Expression[T]':
"""
Returns a number of truncated to n decimal places.
If n is 0, the result has no decimal point or fractional part.
n can be negative to cause n digits left of the decimal point of the value to become zero.
E.g. truncate(42.345, 2) to 42.34, 42.truncate(-1) to 40
"""
return _binary_op("truncate")(self, n)
# ---------------------------- string functions ----------------------------------
def substring(self,
begin_index: Union[int, 'Expression[int]'],
length: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Creates a substring of the given string at given index for a given length.
:param begin_index: first character of the substring (starting at 1, inclusive)
:param length: number of characters of the substring
"""
if length is None:
return _binary_op("substring")(self, begin_index)
else:
return _ternary_op("substring")(self, begin_index, length)
def substr(self,
begin_index: Union[int, 'Expression[int]'],
length: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Creates a substring of the given string at given index for a given length.
:param begin_index: first character of the substring (starting at 1, inclusive)
:param length: number of characters of the substring
"""
if length is None:
return _binary_op("substr")(self, begin_index)
else:
return _ternary_op("substr")(self, begin_index, length)
def trim_leading(self, character: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Removes leading space characters from the given string if character is None.
Otherwise, removes leading specified characters from the given string.
"""
if character is None:
return _unary_op("trimLeading")(self)
else:
return _binary_op("trimLeading")(self, character)
def trim_trailing(self, character: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Removes trailing space characters from the given string if character is None.
Otherwise, removes trailing specified characters from the given string.
"""
if character is None:
return _unary_op("trimTrailing")(self)
else:
return _binary_op("trimTrailing")(self, character)
def trim(self, character: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Removes leading and trailing space characters from the given string if character
is None. Otherwise, removes leading and trailing specified characters from the given string.
"""
if character is None:
return _unary_op("trim")(self)
else:
return _binary_op("trim")(self, character)
def replace(self,
search: Union[str, 'Expression[str]'] = None,
replacement: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Returns a new string which replaces all the occurrences of the search target
with the replacement string (non-overlapping).
e.g. `lit('This is a test String.').replace(' ', '_')` leads to `This_is_a_test_String.`
"""
return _ternary_op("replace")(self, search, replacement)
@property
def char_length(self) -> 'Expression[int]':
"""
Returns the length of a string.
"""
return _unary_op("charLength")(self)
@property
def upper_case(self) -> 'Expression[str]':
"""
Returns all of the characters in a string in upper case using the rules of the default
locale.
"""
return _unary_op("upperCase")(self)
@property
def lower_case(self) -> 'Expression[str]':
"""
Returns all of the characters in a string in lower case using the rules of the default
locale.
"""
return _unary_op("lowerCase")(self)
@property
def init_cap(self) -> 'Expression[str]':
"""
Converts the initial letter of each word in a string to uppercase. Assumes a
string containing only [A-Za-z0-9], everything else is treated as whitespace.
"""
return _unary_op("initCap")(self)
def like(self, pattern: Union[str, 'Expression[str]'] = None) -> 'Expression[bool]':
"""
Returns true, if a string matches the specified LIKE pattern.
e.g. 'Jo_n%' matches all strings that start with 'Jo(arbitrary letter)n'
"""
return _binary_op("like")(self, pattern)
def similar(self, pattern: Union[str, 'Expression[str]'] = None) -> 'Expression[bool]':
"""
Returns true, if a string matches the specified SQL regex pattern.
e.g. 'A+' matches all strings that consist of at least one A
"""
return _binary_op("similar")(self, pattern)
def position(self, haystack: Union[str, 'Expression[str]'] = None) -> 'Expression[int]':
"""
Returns the position of string in an other string starting at 1.
Returns 0 if string could not be found. e.g. lit('a').position('bbbbba') leads to 6.
"""
return _binary_op("position")(self, haystack)
def lpad(self,
length: Union[int, 'Expression[int]'],
pad: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns a string left-padded with the given pad string to a length of len characters.
If the string is longer than len, the return value is shortened to len characters.
e.g. lit('hi').lpad(4, '??') returns '??hi', lit('hi').lpad(1, '??') returns 'h'
"""
return _ternary_op("lpad")(self, length, pad)
def rpad(self,
length: Union[int, 'Expression[int]'],
pad: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns a string right-padded with the given pad string to a length of len characters.
If the string is longer than len, the return value is shortened to len characters.
e.g. lit('hi').rpad(4, '??') returns 'hi??', lit('hi').rpad(1, '??') returns 'h'
"""
return _ternary_op("rpad")(self, length, pad)
def overlay(self,
new_string: Union[str, 'Expression[str]'],
starting: Union[int, 'Expression[int]'],
length: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Replaces a substring of string with a string starting at a position
(starting at 1). e.g. lit('xxxxxtest').overlay('xxxx', 6) leads to 'xxxxxxxxx'
lit('xxxxxtest').overlay('xxxx', 6, 2) leads to 'xxxxxxxxxst'
"""
if length is None:
return _ternary_op("overlay")(self, new_string, starting)
else:
j_expr_new_string = new_string._j_expr \
if isinstance(new_string, Expression) else new_string
j_expr_starting = starting._j_expr \
if isinstance(starting, Expression) else starting
j_expr_length = length._j_expr \
if isinstance(length, Expression) else length
return Expression(getattr(self._j_expr, "overlay")(
j_expr_new_string, j_expr_starting, j_expr_length))
def regexp(self, regex: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns True if any (possibly empty) substring matches the regular expression,
otherwise False. Returns None if any of arguments is None.
"""
return _binary_op("regexp")(self, regex)
def regexp_replace(self,
regex: Union[str, 'Expression[str]'],
replacement: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Returns a string with all substrings that match the regular expression
consecutively being replaced.
"""
return _ternary_op("regexpReplace")(self, regex, replacement)
def regexp_extract(self,
regex: Union[str, 'Expression[str]'],
extract_index: Union[int, 'Expression[int]'] = None) -> 'Expression[str]':
"""
Returns a string extracted with a specified regular expression and a regex match
group index.
"""
if extract_index is None:
return _ternary_op("regexpExtract")(self, regex)
else:
return _ternary_op("regexpExtract")(self, regex, extract_index)
@property
def from_base64(self) -> 'Expression[str]':
"""
Returns the base string decoded with base64.
"""
return _unary_op("fromBase64")(self)
@property
def to_base64(self) -> 'Expression[str]':
"""
Returns the base64-encoded result of the input string.
"""
return _unary_op("toBase64")(self)
@property
def ascii(self) -> 'Expression[int]':
"""
Returns the numeric value of the first character of the input string.
"""
return _unary_op("ascii")(self)
@property
def chr(self) -> 'Expression[str]':
"""
Returns the ASCII character result of the input integer.
"""
return _unary_op("chr")(self)
def decode(self, charset: Union[str, 'Expression[str]']) -> 'Expression[str]':
"""
Decodes the first argument into a String using the provided character set.
"""
return _binary_op("decode")(self, charset)
def encode(self, charset: Union[str, 'Expression[str]']) -> 'Expression[bytes]':
"""
Encodes the string into a BINARY using the provided character set.
"""
return _binary_op("encode")(self, charset)
def left(self, length: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Returns the leftmost integer characters from the input string.
"""
return _binary_op("left")(self, length)
def right(self, length: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Returns the rightmost integer characters from the input string.
"""
return _binary_op("right")(self, length)
def instr(self, s: Union[str, 'Expression[str]']) -> 'Expression[int]':
"""
Returns the position of the first occurrence in the input string.
"""
return _binary_op("instr")(self, s)
def locate(self, s: Union[str, 'Expression[str]'],
pos: Union[int, 'Expression[int]'] = None) -> 'Expression[int]':
"""
Returns the position of the first occurrence in the input string after position integer.
"""
if pos is None:
return _binary_op("locate")(self, s)
else:
return _ternary_op("locate")(self, s, pos)
def parse_url(self, part_to_extract: Union[str, 'Expression[str]'],
key: Union[str, 'Expression[str]'] = None) -> 'Expression[str]':
"""
Parse url and return various parameter of the URL.
If accept any null arguments, return null.
"""
if key is None:
return _binary_op("parseUrl")(self, part_to_extract)
else:
return _ternary_op("parseUrl")(self, part_to_extract, key)
@property
def ltrim(self) -> 'Expression[str]':
"""
Returns a string that removes the left whitespaces from the given string.
"""
return _unary_op("ltrim")(self)
@property
def rtrim(self) -> 'Expression[str]':
"""
Returns a string that removes the right whitespaces from the given string.
"""
return _unary_op("rtrim")(self)
def repeat(self, n: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Returns a string that repeats the base string n times.
"""
return _binary_op("repeat")(self, n)
def over(self, alias) -> 'Expression':
"""
Defines an aggregation to be used for a previously specified over window.
Example:
::
>>> tab.window(Over
>>> .partition_by(col('c'))
>>> .order_by(col('rowtime'))
>>> .preceding(row_interval(2))
>>> .following(CURRENT_ROW)
>>> .alias("w")) \\
>>> .select(col('c'), col('a'), col('a').count.over(col('w')))
"""
return _binary_op("over")(self, alias)
@property
def reverse(self) -> 'Expression[str]':
"""
Reverse each character in current string.
"""
return _unary_op("reverse")(self)
def split_index(self, separator: Union[str, 'Expression[str]'],
index: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Split target string with custom separator and pick the index-th(start with 0) result.
"""
return _ternary_op("splitIndex")(self, separator, index)
def str_to_map(self, list_delimiter: Union[str, 'Expression[str]'] = None,
key_value_delimiter: Union[str, 'Expression[str]'] = None) -> 'Expression[dict]':
"""
Creates a map by parsing text. Split text into key-value pairs using two delimiters. The
first delimiter separates pairs, and the second delimiter separates key and value. Both
list_delimiter and key_value_delimiter are treated as regular expressions.
Default delimiters are used: ',' as list_delimiter and '=' as key_value_delimiter.
"""
if list_delimiter is None or key_value_delimiter is None:
return _unary_op("strToMap")(self)
else:
return _ternary_op("strToMap")(self, list_delimiter, key_value_delimiter)
# ---------------------------- temporal functions ----------------------------------
@property
def to_date(self) -> 'Expression':
"""
Parses a date string in the form "yyyy-MM-dd" to a SQL Date. It's equivalent to
`col.cast(DataTypes.DATE())`.
Example:
::
>>> lit("2016-06-15").to_date
"""
return _unary_op("toDate")(self)
@property
def to_time(self) -> 'Expression':
"""
Parses a time string in the form "HH:mm:ss" to a SQL Time. It's equivalent to
`col.cast(DataTypes.TIME())`.
Example:
::
>>> lit("3:30:00").to_time
"""
return _unary_op("toTime")(self)
@property
def to_timestamp(self) -> 'Expression':
"""
Parses a timestamp string in the form "yyyy-MM-dd HH:mm:ss[.SSS]" to a SQL Timestamp.
It's equivalent to `col.cast(DataTypes.TIMESTAMP(3))`.
Example:
::
>>> lit('2016-06-15 3:30:00.001').to_timestamp
"""
return _unary_op("toTimestamp")(self)
def extract(self, time_interval_unit: TimeIntervalUnit) -> 'Expression':
"""
Extracts parts of a time point or time interval. Returns the part as a long value.
e.g. `lit("2006-06-05").to_date.extract(TimeIntervalUnit.DAY)` leads to `5`.
"""
return _binary_op("extract")(
self, time_interval_unit._to_j_time_interval_unit())
def floor(self, time_interval_unit: TimeIntervalUnit = None) -> 'Expression':
"""
If time_interval_unit is specified, it rounds down a time point to the given
unit, e.g. `lit("12:44:31").to_date.floor(TimeIntervalUnit.MINUTE)` leads to
`12:44:00`. Otherwise, it calculates the largest integer less than or equal to a
given number.
"""
if time_interval_unit is None:
return _unary_op("floor")(self)
else:
return _binary_op("floor")(
self, time_interval_unit._to_j_time_interval_unit())
def ceil(self, time_interval_unit: TimeIntervalUnit = None) -> 'Expression':
"""
If time_interval_unit is specified, it rounds up a time point to the given unit,
e.g. `lit("12:44:31").to_date.floor(TimeIntervalUnit.MINUTE)` leads to 12:45:00.
Otherwise, it calculates the smallest integer greater than or equal to a given number.
"""
if time_interval_unit is None:
return _unary_op("ceil")(self)
else:
return _binary_op("ceil")(
self, time_interval_unit._to_j_time_interval_unit())
# ---------------------------- advanced type helper functions -----------------------------
def get(self, name_or_index: Union[str, int]) -> 'Expression':
"""
Accesses the field of a Flink composite type (such as Tuple, POJO, etc.) by name or index
and returns it's value.
:param name_or_index: name or index of the field (similar to Flink's field expressions)
.. seealso:: :py:attr:`~Expression.flatten`
"""
return _binary_op("get")(self, name_or_index)
@property
def flatten(self) -> 'Expression':
"""
Converts a Flink composite type (such as Tuple, POJO, etc.) and all of its direct subtypes
into a flat representation where every subtype is a separate field.
.. seealso:: :func:`~Expression.get`
"""
return _unary_op("flatten")(self)
def at(self, index) -> 'Expression':
"""
Accesses the element of an array or map based on a key or an index (starting at 1).
:param index: index key or position of the element (array index starting at 1)
.. seealso:: :py:attr:`~Expression.cardinality`, :py:attr:`~Expression.element`
"""
return _binary_op("at")(self, index)
@property
def cardinality(self) -> 'Expression':
"""
Returns the number of elements of an array or number of entries of a map.
.. seealso:: :func:`~Expression.at`, :py:attr:`~Expression.element`
"""
return _unary_op("cardinality")(self)
@property
def element(self) -> 'Expression':
"""
Returns the sole element of an array with a single element. Returns null if the array is
empty. Throws an exception if the array has more than one element.
.. seealso:: :func:`~Expression.at`, :py:attr:`~Expression.cardinality`
"""
return _unary_op("element")(self)
def array_contains(self, needle) -> 'Expression':
"""
Returns whether the given element exists in an array.
Checking for null elements in the array is supported. If the array itself is null, the
function will return null. The given element is cast implicitly to the array's element type
if necessary.
"""
return _binary_op("arrayContains")(self, needle)
def array_distinct(self) -> 'Expression':
"""
Returns an array with unique elements.
If the array itself is null, the function will return null. Keeps ordering of elements.
"""
return _binary_op("arrayDistinct")(self)
def array_position(self, needle) -> 'Expression':
"""
Returns the position of the first occurrence of element in the given array as int.
Returns 0 if the given value could not be found in the array. Returns null if either of the
arguments are null.
NOTE: that this is not zero based, but 1-based index. The first element in the array
has index 1.
"""
return _binary_op("arrayPosition")(self, needle)
def array_remove(self, needle) -> 'Expression':
"""
Removes all elements that equal to element from array.
If the array itself is null, the function will return null. Keeps ordering of elements.
"""
return _binary_op("arrayRemove")(self, needle)
def array_reverse(self) -> 'Expression':
"""
Returns an array in reverse order.
If the array itself is null, the function will return null.
"""
return _binary_op("arrayReverse")(self)
def array_slice(self, start_offset, end_offset=None) -> 'Expression':
"""
Returns a subarray of the input array between 'start_offset' and 'end_offset' inclusive.
The offsets are 1-based however 0 is also treated as the beginning of the array.
Positive values are counted from the beginning of the array while negative from the end.
If 'end_offset' is omitted then this offset is treated as the length of the array.
If 'start_offset' is after 'end_offset' or both are out of array bounds an empty array will
be returned.
Returns null if any input is null.
"""
if end_offset is None:
return _binary_op("array_slice")(self, start_offset)
else:
return _ternary_op("array_slice")(self, start_offset, end_offset)
def array_union(self, array) -> 'Expression':
"""
Returns an array of the elements in the union of array1 and array2, without duplicates.
If any of the array is null, the function will return null.
"""
return _binary_op("arrayUnion")(self, array)
def array_concat(self, *arrays) -> 'Expression':
"""
Returns an array that is the result of concatenating at least one array.
This array contains all the elements in the first array, followed by all
the elements in the second array, and so forth, up to the Nth array.
If any input array is NULL, the function returns NULL.
"""
return _binary_op("arrayConcat")(self, *arrays)
def array_max(self) -> 'Expression':
"""
Returns the maximum value from the array.
if array itself is null, the function returns null.
"""
return _unary_op("arrayMax")(self)
def array_join(self, delimiter, null_replacement=None) -> 'Expression':
"""
Returns a string that represents the concatenation of the elements in the given array and
the elements' data type in the given array is string. The `delimiter` is a string that
separates each pair of consecutive elements of the array. The optional `null_replacement`
is a string that replaces null elements in the array. If `null_replacement` is not
specified, null elements in the array will be omitted from the resulting string.
Returns null if input array or delimiter or nullReplacement are null.
"""
if null_replacement is None:
return _binary_op("array_join")(self, delimiter)
else:
return _ternary_op("array_join")(self, delimiter, null_replacement)
@property
def map_keys(self) -> 'Expression':
"""
Returns the keys of the map as an array. No order guaranteed.
.. seealso:: :py:attr:`~Expression.map_keys`
"""
return _unary_op("mapKeys")(self)
@property
def map_values(self) -> 'Expression':
"""
Returns the values of the map as an array. No order guaranteed.
.. seealso:: :py:attr:`~Expression.map_values`
"""
return _unary_op("mapValues")(self)
@property
def map_entries(self) -> 'Expression':
"""
Returns an array of all entries in the given map. No order guaranteed.
.. seealso:: :py:attr:`~Expression.map_entries`
"""
return _unary_op("mapEntries")(self)
# ---------------------------- time definition functions -----------------------------
@property
def rowtime(self) -> 'Expression':
"""
Declares a field as the rowtime attribute for indicating, accessing, and working in
Flink's event time.
.. seealso:: :py:attr:`~Expression.rowtime`
"""
return _unary_op("rowtime")(self)
@property
def proctime(self) -> 'Expression':
"""
Declares a field as the proctime attribute for indicating, accessing, and working in
Flink's processing time.
.. seealso:: :py:attr:`~Expression.proctime`
"""
return _unary_op("proctime")(self)
@property
def year(self) -> 'Expression':
return _unary_op("year")(self)
@property
def years(self) -> 'Expression':
return _unary_op("years")(self)
@property
def quarter(self) -> 'Expression':
return _unary_op("quarter")(self)
@property
def quarters(self) -> 'Expression':
return _unary_op("quarters")(self)
@property
def month(self) -> 'Expression':
return _unary_op("month")(self)
@property
def months(self) -> 'Expression':
return _unary_op("months")(self)
@property
def week(self) -> 'Expression':
return _unary_op("week")(self)
@property
def weeks(self) -> 'Expression':
return _unary_op("weeks")(self)
@property
def day(self) -> 'Expression':
return _unary_op("day")(self)
@property
def days(self) -> 'Expression':
return _unary_op("days")(self)
@property
def hour(self) -> 'Expression':
return _unary_op("hour")(self)
@property
def hours(self) -> 'Expression':
return _unary_op("hours")(self)
@property
def minute(self) -> 'Expression':
return _unary_op("minute")(self)
@property
def minutes(self) -> 'Expression':
return _unary_op("minutes")(self)
@property
def second(self) -> 'Expression':
return _unary_op("second")(self)
@property
def seconds(self) -> 'Expression':
return _unary_op("seconds")(self)
@property
def milli(self) -> 'Expression':
return _unary_op("milli")(self)
@property
def millis(self) -> 'Expression':
return _unary_op("millis")(self)
# ---------------------------- hash functions -----------------------------
@property
def md5(self) -> 'Expression[str]':
return _unary_op("md5")(self)
@property
def sha1(self) -> 'Expression[str]':
return _unary_op("sha1")(self)
@property
def sha224(self) -> 'Expression[str]':
return _unary_op("sha224")(self)
@property
def sha256(self) -> 'Expression[str]':
return _unary_op("sha256")(self)
@property
def sha384(self) -> 'Expression[str]':
return _unary_op("sha384")(self)
@property
def sha512(self) -> 'Expression[str]':
return _unary_op("sha512")(self)
def sha2(self, hash_length: Union[int, 'Expression[int]']) -> 'Expression[str]':
"""
Returns the hash for the given string expression using the SHA-2 family of hash
functions (SHA-224, SHA-256, SHA-384, or SHA-512).
:param hash_length: bit length of the result (either 224, 256, 384, or 512)
:return: string or null if one of the arguments is null.
.. seealso:: :py:attr:`~Expression.md5`, :py:attr:`~Expression.sha1`,
:py:attr:`~Expression.sha224`, :py:attr:`~Expression.sha256`,
:py:attr:`~Expression.sha384`, :py:attr:`~Expression.sha512`
"""
return _binary_op("sha2")(self, hash_length)
# ---------------------------- JSON functions -----------------------------
def is_json(self, json_type: JsonType = None) -> 'Expression[bool]':
"""
Determine whether a given string is valid JSON.
Specifying the optional `json_type` argument puts a constraint on which type of JSON object
is allowed. If the string is valid JSON, but not that type, `false` is returned. The default
is `JsonType.VALUE`.
Examples:
::
>>> lit('1').is_json() # True
>>> lit('[]').is_json() # True
>>> lit('{}').is_json() # True
>>> lit('"abc"').is_json() # True
>>> lit('abc').is_json() # False
>>> null_of(DataTypes.STRING()).is_json() # False
>>> lit('1').is_json(JsonType.SCALAR) # True
>>> lit('1').is_json(JsonType.ARRAY) # False
>>> lit('1').is_json(JsonType.OBJECT) # False
>>> lit('{}').is_json(JsonType.SCALAR) # False
>>> lit('{}').is_json(JsonType.ARRAY) # False
>>> lit('{}').is_json(JsonType.OBJECT) # True
"""
if json_type is None:
return _unary_op("isJson")(self)
else:
return _binary_op("isJson")(self, json_type._to_j_json_type())
def json_exists(self, path: str, on_error: JsonExistsOnError = None) -> 'Expression[bool]':
"""
Determines whether a JSON string satisfies a given search criterion.
This follows the ISO/IEC TR 19075-6 specification for JSON support in SQL.
Examples:
::
>>> lit('{"a": true}').json_exists('$.a') # True
>>> lit('{"a": true}').json_exists('$.b') # False
>>> lit('{"a": [{ "b": 1 }]}').json_exists('$.a[0].b') # True
>>> lit('{"a": true}').json_exists('strict $.b', JsonExistsOnError.TRUE) # True
>>> lit('{"a": true}').json_exists('strict $.b', JsonExistsOnError.FALSE) # False
"""
if on_error is None:
return _binary_op("jsonExists")(self, path)
else:
return _ternary_op("jsonExists")(self, path, on_error._to_j_json_exists_on_error())
def json_value(self,
path: str,
returning_type: DataType = DataTypes.STRING(),
on_empty: JsonValueOnEmptyOrError = JsonValueOnEmptyOrError.NULL,
default_on_empty: Any = None,
on_error: JsonValueOnEmptyOrError = JsonValueOnEmptyOrError.NULL,
default_on_error: Any = None) -> 'Expression':
"""
Extracts a scalar from a JSON string.
This method searches a JSON string for a given path expression and returns the value if the
value at that path is scalar. Non-scalar values cannot be returned. By default, the value is
returned as `DataTypes.STRING()`. Using `returningType` a different type can be chosen, with
the following types being supported:
* `STRING`
* `BOOLEAN`
* `INT`
* `DOUBLE`
For empty path expressions or errors a behavior can be defined to either return `null`,
raise an error or return a defined default value instead.
seealso:: :func:`~Expression.json_query`
Examples:
::
>>> lit('{"a": true}').json_value('$.a') # STRING: 'true'
>>> lit('{"a.b": [0.998,0.996]}').json_value("$.['a.b'][0]", \
DataTypes.DOUBLE()) # DOUBLE: 0.998
>>> lit('{"a": true}').json_value('$.a', DataTypes.BOOLEAN()) # BOOLEAN: True
>>> lit('{"a": true}').json_value('lax $.b', \
JsonValueOnEmptyOrError.DEFAULT, False) # BOOLEAN: False
>>> lit('{"a": true}').json_value('strict $.b', \
JsonValueOnEmptyOrError.NULL, None, \
JsonValueOnEmptyOrError.DEFAULT, False) # BOOLEAN: False
"""
return _varargs_op("jsonValue")(self, path, _to_java_data_type(returning_type),
on_empty._to_j_json_value_on_empty_or_error(),
default_on_empty,
on_error._to_j_json_value_on_empty_or_error(),
default_on_error)
def json_query(self, path: str, wrapping_behavior=JsonQueryWrapper.WITHOUT_ARRAY,
on_empty=JsonQueryOnEmptyOrError.NULL,
on_error=JsonQueryOnEmptyOrError.NULL) -> 'Expression':
"""
Extracts JSON values from a JSON string.
This follows the ISO/IEC TR 19075-6 specification for JSON support in SQL. The result is
always returned as a `STRING`.
The `wrapping_behavior` determines whether the extracted value should be wrapped into an
array, and whether to do so unconditionally or only if the value itself isn't an array
already.
`on_empty` and `on_error` determine the behavior in case the path expression is empty, or in
case an error was raised, respectively. By default, in both cases `null` is returned.
Other choices are to use an empty array, an empty object, or to raise an error.
seealso:: :func:`~Expression.json_value`
Examples:
::
>>> lit('{"a":{"b":1}}').json_query('$.a') # '{"b":1}'
>>> lit('[1,2]').json_query('$') # '[1,2]'
>>> null_of(DataTypes.STRING()).json_query('$') # None
>>> lit('{}').json_query('$', JsonQueryWrapper.CONDITIONAL_ARRAY) # '[{}]'
>>> lit('[1,2]').json_query('$', JsonQueryWrapper.CONDITIONAL_ARRAY) # '[1,2]'
>>> lit('[1,2]').json_query('$', JsonQueryWrapper.UNCONDITIONAL_ARRAY) # '[[1,2]]'
>>> lit(1).json_query('$') # null
>>> lit(1).json_query('$', JsonQueryWrapper.CONDITIONAL_ARRAY) # '[1]'
>>> lit('{}').json_query('lax $.invalid', JsonQueryWrapper.WITHOUT_ARRAY, \
JsonQueryOnEmptyOrError.EMPTY_OBJECT, \
JsonQueryOnEmptyOrError.NULL) # '{}'
>>> lit('{}').json_query('strict $.invalid', JsonQueryWrapper.WITHOUT_ARRAY, \
JsonQueryOnEmptyOrError.NULL, \
JsonQueryOnEmptyOrError.EMPTY_ARRAY) # '[]'
"""
return _varargs_op("jsonQuery")(self, path, wrapping_behavior._to_j_json_query_wrapper(),
on_empty._to_j_json_query_on_error_or_empty(),
on_error._to_j_json_query_on_error_or_empty())
# add the docs
_make_math_log_doc()
_make_math_trigonometric_doc()
_make_aggregation_doc()
_make_string_doc()
_make_temporal_doc()
_make_time_doc()
_make_hash_doc()
# add the version docs
_add_version_doc()
| 67,737 | 34.973447 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/sinks.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table.types import _to_java_data_type
from pyflink.util import java_utils
__all__ = ['TableSink', 'CsvTableSink', 'WriteMode']
class TableSink(object):
"""
A :class:`TableSink` specifies how to emit a table to an external system or location.
"""
def __init__(self, j_table_sink):
self._j_table_sink = j_table_sink
class WriteMode(object):
NO_OVERWRITE = 0
OVERWRITE = 1
class CsvTableSink(TableSink):
"""
A simple :class:`TableSink` to emit data as CSV files.
Example:
::
>>> CsvTableSink(["a", "b"], [DataTypes.INT(), DataTypes.STRING()],
... "/csv/file/path", "|", 1, WriteMode.OVERWRITE)
:param field_names: The list of field names.
:param field_types: The list of field data types.
:param path: The output path to write the Table to.
:param field_delimiter: The field delimiter.
:param num_files: The number of files to write to.
:param write_mode: The write mode to specify whether existing files are overwritten or not,
which contains: :data:`WriteMode.NO_OVERWRITE`
and :data:`WriteMode.OVERWRITE`.
"""
def __init__(self, field_names, field_types, path, field_delimiter=',', num_files=-1,
write_mode=None):
gateway = get_gateway()
if write_mode == WriteMode.NO_OVERWRITE:
j_write_mode = gateway.jvm.org.apache.flink.core.fs.FileSystem.WriteMode.NO_OVERWRITE
elif write_mode == WriteMode.OVERWRITE:
j_write_mode = gateway.jvm.org.apache.flink.core.fs.FileSystem.WriteMode.OVERWRITE
elif write_mode is None:
j_write_mode = None
else:
raise Exception('Unsupported write_mode: %s' % write_mode)
j_field_names = java_utils.to_jarray(gateway.jvm.String, field_names)
j_field_types = java_utils.to_jarray(
gateway.jvm.DataType,
[_to_java_data_type(field_type) for field_type in field_types])
j_csv_table_sink = gateway.jvm.CsvTableSink(
path, field_delimiter, num_files, j_write_mode, j_field_names, j_field_types)
super(CsvTableSink, self).__init__(j_csv_table_sink)
| 3,226 | 39.848101 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/table/explain_detail.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
__all__ = ['ExplainDetail']
class ExplainDetail(object):
"""
ExplainDetail defines the types of details for explain result.
.. versionadded:: 1.11.0
"""
# The cost information on physical rel node estimated by optimizer.
# e.g. TableSourceScan(..., cumulative cost = {1.0E8 rows, 1.0E8 cpu, 2.4E9 io, 0.0 network,
# 0.0 memory}
ESTIMATED_COST = 0
# The changelog mode produced by a physical rel node.
# e.g. GroupAggregate(..., changelogMode=[I,UA,D])
CHANGELOG_MODE = 1
# The execution plan in json format of the program.
JSON_EXECUTION_PLAN = 2
# The potential risk warnings and SQL optimizer tuning advice analyzed from the physical plan.
PLAN_ADVICE = 3
| 1,684 | 38.186047 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/table.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from py4j.java_gateway import get_method
from typing import Union
from pyflink.java_gateway import get_gateway
from pyflink.table import ExplainDetail
from pyflink.table.expression import Expression, _get_java_expression
from pyflink.table.expressions import col, with_columns, without_columns
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.table_result import TableResult
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import create_arrow_schema
from pyflink.table.udf import UserDefinedScalarFunctionWrapper, \
UserDefinedAggregateFunctionWrapper, UserDefinedTableFunctionWrapper
from pyflink.table.utils import tz_convert_from_internal, to_expression_jarray
from pyflink.table.window import OverWindow, GroupWindow
from pyflink.util.java_utils import to_jarray
from pyflink.util.java_utils import to_j_explain_detail_arr
__all__ = ['Table', 'GroupedTable', 'GroupWindowedTable', 'OverWindowedTable', 'WindowGroupedTable']
class Table(object):
"""
A :class:`~pyflink.table.Table` object is the core abstraction of the Table API.
Similar to how the DataStream API has DataStream,
the Table API is built around :class:`~pyflink.table.Table`.
A :class:`~pyflink.table.Table` object describes a pipeline of data transformations. It does not
contain the data itself in any way. Instead, it describes how to read data from a table source,
and how to eventually write data to a table sink. The declared pipeline can be
printed, optimized, and eventually executed in a cluster. The pipeline can work with bounded or
unbounded streams which enables both streaming and batch scenarios.
By the definition above, a :class:`~pyflink.table.Table` object can actually be considered as
a view in SQL terms.
The initial :class:`~pyflink.table.Table` object is constructed by a
:class:`~pyflink.table.TableEnvironment`. For example,
:func:`~pyflink.table.TableEnvironment.from_path` obtains a table from a catalog.
Every :class:`~pyflink.table.Table` object has a schema that is available through
:func:`~pyflink.table.Table.get_schema`. A :class:`~pyflink.table.Table` object is
always associated with its original table environment during programming.
Every transformation (i.e. :func:`~pyflink.table.Table.select`} or
:func:`~pyflink.table.Table.filter` on a :class:`~pyflink.table.Table` object leads to a new
:class:`~pyflink.table.Table` object.
Use :func:`~pyflink.table.Table.execute` to execute the pipeline and retrieve the transformed
data locally during development. Otherwise, use :func:`~pyflink.table.Table.execute_insert` to
write the data into a table sink.
Many methods of this class take one or more :class:`~pyflink.table.Expression` as parameters.
For fluent definition of expressions and easier readability, we recommend to add a star import:
Example:
::
>>> from pyflink.table.expressions import *
Check the documentation for more programming language specific APIs.
The following example shows how to work with a :class:`~pyflink.table.Table` object.
Example:
::
>>> from pyflink.table import EnvironmentSettings, TableEnvironment
>>> from pyflink.table.expressions import *
>>> env_settings = EnvironmentSettings.in_streaming_mode()
>>> t_env = TableEnvironment.create(env_settings)
>>> table = t_env.from_path("my_table").select(col("colA").trim(), col("colB") + 12)
>>> table.execute().print()
"""
def __init__(self, j_table, t_env):
self._j_table = j_table
self._t_env = t_env
def __str__(self):
return self._j_table.toString()
def __getattr__(self, name) -> Expression:
"""
Returns the :class:`Expression` of the column `name`.
Example:
::
>>> tab.select(tab.a)
"""
if name not in self.get_schema().get_field_names():
raise AttributeError(
"The current table has no column named '%s', available columns: [%s]"
% (name, ', '.join(self.get_schema().get_field_names())))
return col(name)
def select(self, *fields: Expression) -> 'Table':
"""
Performs a selection operation. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions.
Example:
::
>>> from pyflink.table.expressions import col, concat
>>> tab.select(tab.key, concat(tab.value, 'hello'))
>>> tab.select(col('key'), concat(col('value'), 'hello'))
:return: The result table.
"""
return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env)
def alias(self, field: str, *fields: str) -> 'Table':
"""
Renames the fields of the expression result. Use this to disambiguate fields before
joining two tables.
Example:
::
>>> tab.alias("a", "b", "c")
:param field: Field alias.
:param fields: Additional field aliases.
:return: The result table.
"""
gateway = get_gateway()
extra_fields = to_jarray(gateway.jvm.String, fields)
return Table(get_method(self._j_table, "as")(field, extra_fields), self._t_env)
def filter(self, predicate: Expression[bool]) -> 'Table':
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.filter(col('name') == 'Fred')
:param predicate: Predicate expression string.
:return: The result table.
"""
return Table(self._j_table.filter(_get_java_expression(predicate)), self._t_env)
def where(self, predicate: Expression[bool]) -> 'Table':
"""
Filters out elements that don't pass the filter predicate. Similar to a SQL WHERE
clause.
Example:
::
>>> tab.where(col('name') == 'Fred')
:param predicate: Predicate expression string.
:return: The result table.
"""
return Table(self._j_table.where(_get_java_expression(predicate)), self._t_env)
def group_by(self, *fields: Expression) -> 'GroupedTable':
"""
Groups the elements on some grouping keys. Use this before a selection with aggregations
to perform the aggregation on a per-group basis. Similar to a SQL GROUP BY statement.
Example:
::
>>> tab.group_by(col('key')).select(col('key'), col('value').avg)
:param fields: Group keys.
:return: The grouped table.
"""
return GroupedTable(self._j_table.groupBy(to_expression_jarray(fields)), self._t_env)
def distinct(self) -> 'Table':
"""
Removes duplicate values and returns only distinct (different) values.
Example:
::
>>> tab.select(col('key'), col('value')).distinct()
:return: The result table.
"""
return Table(self._j_table.distinct(), self._t_env)
def join(self, right: 'Table', join_predicate: Expression[bool] = None):
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL join. The fields of the two joined
operations must not overlap, use :func:`~pyflink.table.Table.alias` to rename fields if
necessary. You can use where and select clauses after a join to further specify the
behaviour of the join.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` .
Example:
::
>>> left.join(right).where((col('a') == col('b')) && (col('c') > 3))
>>> left.join(right, col('a') == col('b'))
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result table.
"""
if join_predicate is not None:
return Table(self._j_table.join(
right._j_table, _get_java_expression(join_predicate)), self._t_env)
else:
return Table(self._j_table.join(right._j_table), self._t_env)
def left_outer_join(self,
right: 'Table',
join_predicate: Expression[bool] = None) -> 'Table':
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL left outer join. The fields of
the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to
rename fields if necessary.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its
:class:`~pyflink.table.TableConfig` must have null check enabled (default).
Example:
::
>>> left.left_outer_join(right)
>>> left.left_outer_join(right, col('a') == col('b'))
:param right: Right table.
:param join_predicate: Optional, the join predicate expression string.
:return: The result table.
"""
if join_predicate is None:
return Table(self._j_table.leftOuterJoin(right._j_table), self._t_env)
else:
return Table(self._j_table.leftOuterJoin(
right._j_table, _get_java_expression(join_predicate)), self._t_env)
def right_outer_join(self,
right: 'Table',
join_predicate: Expression[bool]) -> 'Table':
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL right outer join. The fields of
the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to
rename fields if necessary.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its
:class:`~pyflink.table.TableConfig` must have null check enabled (default).
Example:
::
>>> left.right_outer_join(right, col('a') == col('b'))
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result table.
"""
return Table(self._j_table.rightOuterJoin(
right._j_table, _get_java_expression(join_predicate)), self._t_env)
def full_outer_join(self,
right: 'Table',
join_predicate: Expression[bool]) -> 'Table':
"""
Joins two :class:`~pyflink.table.Table`. Similar to a SQL full outer join. The fields of
the two joined operations must not overlap, use :func:`~pyflink.table.Table.alias` to
rename fields if necessary.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment` and its
:class:`~pyflink.table.TableConfig` must have null check enabled (default).
Example:
::
>>> left.full_outer_join(right, col('a') == col('b'))
:param right: Right table.
:param join_predicate: The join predicate expression string.
:return: The result table.
"""
return Table(self._j_table.fullOuterJoin(
right._j_table, _get_java_expression(join_predicate)), self._t_env)
def join_lateral(self,
table_function_call: Union[Expression, UserDefinedTableFunctionWrapper],
join_predicate: Expression[bool] = None) -> 'Table':
"""
Joins this Table with an user-defined TableFunction. This join is similar to a SQL inner
join but works with a table function. Each row of the table is joined with the rows
produced by the table function.
Example:
::
>>> from pyflink.table.expressions import *
>>> t_env.create_java_temporary_system_function("split",
... "java.table.function.class.name")
>>> tab.join_lateral(call('split', ' ').alias('b'), col('a') == col('b'))
>>> # take all the columns as inputs
>>> @udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
... def split_row(row: Row):
... for s in row[1].split(","):
... yield row[0], s
>>> tab.join_lateral(split_row.alias("a", "b"))
:param table_function_call: An expression representing a table function call.
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:return: The result Table.
"""
if isinstance(table_function_call, UserDefinedTableFunctionWrapper):
table_function_call._set_takes_row_as_input()
if hasattr(table_function_call, "_alias_names"):
alias_names = getattr(table_function_call, "_alias_names")
table_function_call = table_function_call(with_columns(col("*"))) \
.alias(*alias_names)
else:
raise AttributeError('table_function_call must be followed by a alias function'
'e.g. table_function.alias("a", "b")')
if join_predicate is None:
return Table(self._j_table.joinLateral(
_get_java_expression(table_function_call)), self._t_env)
else:
return Table(self._j_table.joinLateral(
_get_java_expression(table_function_call),
_get_java_expression(join_predicate)),
self._t_env)
def left_outer_join_lateral(self,
table_function_call: Union[Expression,
UserDefinedTableFunctionWrapper],
join_predicate: Expression[bool] = None) -> 'Table':
"""
Joins this Table with an user-defined TableFunction. This join is similar to
a SQL left outer join but works with a table function. Each row of the table is joined
with all rows produced by the table function. If the join does not produce any row, the
outer row is padded with nulls.
Example:
::
>>> t_env.create_java_temporary_system_function("split",
... "java.table.function.class.name")
>>> from pyflink.table.expressions import *
>>> tab.left_outer_join_lateral(call('split', ' ').alias('b'))
>>> # take all the columns as inputs
>>> @udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
... def split_row(row: Row):
... for s in row[1].split(","):
... yield row[0], s
>>> tab.left_outer_join_lateral(split_row.alias("a", "b"))
:param table_function_call: An expression representing a table function call.
:param join_predicate: Optional, The join predicate expression string, join ON TRUE if not
exist.
:return: The result Table.
"""
if isinstance(table_function_call, UserDefinedTableFunctionWrapper):
table_function_call._set_takes_row_as_input()
if hasattr(table_function_call, "_alias_names"):
alias_names = getattr(table_function_call, "_alias_names")
table_function_call = table_function_call(with_columns(col("*"))) \
.alias(*alias_names)
else:
raise AttributeError('table_function_call must be followed by a alias function'
'e.g. table_function.alias("a", "b")')
if join_predicate is None:
return Table(self._j_table.leftOuterJoinLateral(
_get_java_expression(table_function_call)), self._t_env)
else:
return Table(self._j_table.leftOuterJoinLateral(
_get_java_expression(table_function_call),
_get_java_expression(join_predicate)),
self._t_env)
def minus(self, right: 'Table') -> 'Table':
"""
Minus of two :class:`~pyflink.table.Table` with duplicate records removed.
Similar to a SQL EXCEPT clause. Minus returns records from the left table that do not
exist in the right table. Duplicate records in the left table are returned
exactly once, i.e., duplicates are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.minus(right)
:param right: Right table.
:return: The result table.
"""
return Table(self._j_table.minus(right._j_table), self._t_env)
def minus_all(self, right: 'Table') -> 'Table':
"""
Minus of two :class:`~pyflink.table.Table`. Similar to a SQL EXCEPT ALL.
Similar to a SQL EXCEPT ALL clause. MinusAll returns the records that do not exist in
the right table. A record that is present n times in the left table and m times
in the right table is returned (n - m) times, i.e., as many duplicates as are present
in the right table are removed. Both tables must have identical field types.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.minus_all(right)
:param right: Right table.
:return: The result table.
"""
return Table(self._j_table.minusAll(right._j_table), self._t_env)
def union(self, right: 'Table') -> 'Table':
"""
Unions two :class:`~pyflink.table.Table` with duplicate records removed.
Similar to a SQL UNION. The fields of the two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.union(right)
:param right: Right table.
:return: The result table.
"""
return Table(self._j_table.union(right._j_table), self._t_env)
def union_all(self, right: 'Table') -> 'Table':
"""
Unions two :class:`~pyflink.table.Table`. Similar to a SQL UNION ALL. The fields of the
two union operations must fully overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.union_all(right)
:param right: Right table.
:return: The result table.
"""
return Table(self._j_table.unionAll(right._j_table), self._t_env)
def intersect(self, right: 'Table') -> 'Table':
"""
Intersects two :class:`~pyflink.table.Table` with duplicate records removed. Intersect
returns records that exist in both tables. If a record is present in one or both tables
more than once, it is returned just once, i.e., the resulting table has no duplicate
records. Similar to a SQL INTERSECT. The fields of the two intersect operations must fully
overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.intersect(right)
:param right: Right table.
:return: The result table.
"""
return Table(self._j_table.intersect(right._j_table), self._t_env)
def intersect_all(self, right: 'Table') -> 'Table':
"""
Intersects two :class:`~pyflink.table.Table`. IntersectAll returns records that exist in
both tables. If a record is present in both tables more than once, it is returned as many
times as it is present in both tables, i.e., the resulting table might have duplicate
records. Similar to an SQL INTERSECT ALL. The fields of the two intersect operations must
fully overlap.
.. note::
Both tables must be bound to the same :class:`~pyflink.table.TableEnvironment`.
Example:
::
>>> left.intersect_all(right)
:param right: Right table.
:return: The result table.
"""
return Table(self._j_table.intersectAll(right._j_table), self._t_env)
def order_by(self, *fields: Expression) -> 'Table':
"""
Sorts the given :class:`~pyflink.table.Table`. Similar to SQL ORDER BY.
The resulting Table is sorted globally sorted across all parallel partitions.
Example:
::
>>> tab.order_by(col('name').desc)
For unbounded tables, this operation requires a sorting on a time attribute or a subsequent
fetch operation.
:param fields: Order fields expression string.
:return: The result table.
"""
return Table(self._j_table.orderBy(to_expression_jarray(fields)), self._t_env)
def offset(self, offset: int) -> 'Table':
"""
Limits a (possibly sorted) result from an offset position.
This method can be combined with a preceding :func:`~pyflink.table.Table.order_by` call for
a deterministic order and a subsequent :func:`~pyflink.table.Table.fetch` call to return n
rows after skipping the first o rows.
Example:
::
# skips the first 3 rows and returns all following rows.
>>> tab.order_by(col('name').desc).offset(3)
# skips the first 10 rows and returns the next 5 rows.
>>> tab.order_by(col('name').desc).offset(10).fetch(5)
For unbounded tables, this operation requires a subsequent fetch operation.
:param offset: Number of records to skip.
:return: The result table.
"""
return Table(self._j_table.offset(offset), self._t_env)
def fetch(self, fetch: int) -> 'Table':
"""
Limits a (possibly sorted) result to the first n rows.
This method can be combined with a preceding :func:`~pyflink.table.Table.order_by` call for
a deterministic order and :func:`~pyflink.table.Table.offset` call to return n rows after
skipping the first o rows.
Example:
Returns the first 3 records.
::
>>> tab.order_by(col('name').desc).fetch(3)
Skips the first 10 rows and returns the next 5 rows.
::
>>> tab.order_by(col('name').desc).offset(10).fetch(5)
:param fetch: The number of records to return. Fetch must be >= 0.
:return: The result table.
"""
return Table(self._j_table.fetch(fetch), self._t_env)
def limit(self, fetch: int, offset: int = 0) -> 'Table':
"""
Limits a (possibly sorted) result to the first n rows.
This method is a synonym for :func:`~pyflink.table.Table.offset` followed by
:func:`~pyflink.table.Table.fetch`.
Example:
Returns the first 3 records.
::
>>> tab.limit(3)
Skips the first 10 rows and returns the next 5 rows.
::
>>> tab.limit(5, 10)
:param fetch: the first number of rows to fetch.
:param offset: the number of records to skip, default 0.
:return: The result table.
"""
return self.offset(offset).fetch(fetch)
def window(self, window: GroupWindow) -> 'GroupWindowedTable':
"""
Defines group window on the records of a table.
A group window groups the records of a table by assigning them to windows defined by a time
or row interval.
For streaming tables of infinite size, grouping into windows is required to define finite
groups on which group-based aggregates can be computed.
For batch tables of finite size, windowing essentially provides shortcuts for time-based
groupBy.
.. note::
Computing windowed aggregates on a streaming table is only a parallel operation
if additional grouping attributes are added to the
:func:`~pyflink.table.GroupWindowedTable.group_by` clause.
If the :func:`~pyflink.table.GroupWindowedTable.group_by` only references a GroupWindow
alias, the streamed table will be processed by a single task, i.e., with parallelism 1.
Example:
::
>>> from pyflink.table.expressions import col, lit
>>> tab.window(Tumble.over(lit(10).minutes).on(col('rowtime')).alias('w')) \\
... .group_by(col('w')) \\
... .select(col('a').sum.alias('a'),
... col('w').start.alias('b'),
... col('w').end.alias('c'),
... col('w').rowtime.alias('d'))
:param window: A :class:`~pyflink.table.window.GroupWindow` created from
:class:`~pyflink.table.window.Tumble`, :class:`~pyflink.table.window.Session`
or :class:`~pyflink.table.window.Slide`.
:return: A group windowed table.
"""
return GroupWindowedTable(self._j_table.window(window._java_window), self._t_env)
def over_window(self, *over_windows: OverWindow) -> 'OverWindowedTable':
"""
Defines over-windows on the records of a table.
An over-window defines for each record an interval of records over which aggregation
functions can be computed.
Example:
::
>>> from pyflink.table.expressions import col, lit
>>> tab.over_window(Over.partition_by(col('c')).order_by(col('rowtime')) \\
... .preceding(lit(10).seconds).alias("ow")) \\
... .select(col('c'), col('b').count.over(col('ow'), col('e').sum.over(col('ow'))))
.. note::
Computing over window aggregates on a streaming table is only a parallel
operation if the window is partitioned. Otherwise, the whole stream will be processed
by a single task, i.e., with parallelism 1.
.. note::
Over-windows for batch tables are currently not supported.
:param over_windows: over windows created from :class:`~pyflink.table.window.Over`.
:return: A over windowed table.
"""
gateway = get_gateway()
window_array = to_jarray(gateway.jvm.OverWindow,
[item._java_over_window for item in over_windows])
return OverWindowedTable(self._j_table.window(window_array), self._t_env)
def add_columns(self, *fields: Expression) -> 'Table':
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. It will throw an
exception if the added fields already exist.
Example:
::
>>> from pyflink.table.expressions import col, concat
>>> tab.add_columns((col('a') + 1).alias('a1'), concat(col('b'), 'sunny').alias('b1'))
:param fields: Column list string.
:return: The result table.
"""
return Table(self._j_table.addColumns(to_expression_jarray(fields)), self._t_env)
def add_or_replace_columns(self, *fields: Expression) -> 'Table':
"""
Adds additional columns. Similar to a SQL SELECT statement. The field expressions
can contain complex expressions, but can not contain aggregations. Existing fields will be
replaced if add columns name is the same as the existing column name. Moreover, if the added
fields have duplicate field name, then the last one is used.
Example:
::
>>> from pyflink.table.expressions import col, concat
>>> tab.add_or_replace_columns((col('a') + 1).alias('a1'),
... concat(col('b'), 'sunny').alias('b1'))
:param fields: Column list string.
:return: The result table.
"""
return Table(self._j_table.addOrReplaceColumns(to_expression_jarray(fields)),
self._t_env)
def rename_columns(self, *fields: Expression) -> 'Table':
"""
Renames existing columns. Similar to a field alias statement. The field expressions
should be alias expressions, and only the existing fields can be renamed.
Example:
::
>>> tab.rename_columns(col('a').alias('a1'), col('b').alias('b1'))
:param fields: Column list string.
:return: The result table.
"""
return Table(self._j_table.renameColumns(to_expression_jarray(fields)),
self._t_env)
def drop_columns(self, *fields: Expression) -> 'Table':
"""
Drops existing columns. The field expressions should be field reference expressions.
Example:
::
>>> tab.drop_columns(col('a'), col('b'))
:param fields: Column list string.
:return: The result table.
"""
return Table(self._j_table.dropColumns(to_expression_jarray(fields)),
self._t_env)
def map(self, func: Union[Expression, UserDefinedScalarFunctionWrapper]) -> 'Table':
"""
Performs a map operation with a user-defined scalar function.
Example:
::
>>> add = udf(lambda x: Row(x + 1, x * x), result_type=DataTypes.Row(
... [DataTypes.FIELD("a", DataTypes.INT()), DataTypes.FIELD("b", DataTypes.INT())]))
>>> tab.map(add(col('a'))).alias("a", "b")
>>> # take all the columns as inputs
>>> identity = udf(lambda row: row, result_type=DataTypes.Row(
... [DataTypes.FIELD("a", DataTypes.INT()), DataTypes.FIELD("b", DataTypes.INT())]))
>>> tab.map(identity)
:param func: user-defined scalar function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return Table(self._j_table.map(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
return Table(self._j_table.map(func(with_columns(col("*")))._j_expr), self._t_env)
def flat_map(self, func: Union[Expression, UserDefinedTableFunctionWrapper]) -> 'Table':
"""
Performs a flatMap operation with a user-defined table function.
Example:
::
>>> @udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
... def split(x, string):
... for s in string.split(","):
... yield x, s
>>> tab.flat_map(split(col('a'), col('b')))
>>> # take all the columns as inputs
>>> @udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
... def split_row(row: Row):
... for s in row[1].split(","):
... yield row[0], s
>>> tab.flat_map(split_row)
:param func: user-defined table function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return Table(self._j_table.flatMap(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
return Table(self._j_table.flatMap(func(with_columns(col("*")))._j_expr), self._t_env)
def aggregate(self, func: Union[Expression, UserDefinedAggregateFunctionWrapper]) \
-> 'AggregatedTable':
"""
Performs a global aggregate operation with an aggregate function. You have to close the
aggregate with a select statement.
Example:
::
>>> agg = udaf(lambda a: (a.mean(), a.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> tab.aggregate(agg(col('a')).alias("a", "b")).select(col('a'), col('b'))
>>> # take all the columns as inputs
>>> # pd is a Pandas.DataFrame
>>> agg_row = udaf(lambda pd: (pd.a.mean(), pd.a.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> tab.aggregate(agg.alias("a", "b")).select(col('a'), col('b'))
:param func: user-defined aggregate function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return AggregatedTable(self._j_table.aggregate(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
if hasattr(func, "_alias_names"):
alias_names = getattr(func, "_alias_names")
func = func(with_columns(col("*"))).alias(*alias_names)
else:
func = func(with_columns(col("*")))
return AggregatedTable(self._j_table.aggregate(func._j_expr), self._t_env)
def flat_aggregate(self, func: Union[Expression, UserDefinedAggregateFunctionWrapper]) \
-> 'FlatAggregateTable':
"""
Perform a global flat_aggregate without group_by. flat_aggregate takes a
:class:`~pyflink.table.TableAggregateFunction` which returns multiple rows. Use a selection
after the flat_aggregate.
Example:
::
>>> table_agg = udtaf(MyTableAggregateFunction())
>>> tab.flat_aggregate(table_agg(col('a')).alias("a", "b")).select(col('a'), col('b'))
>>> # take all the columns as inputs
>>> class Top2(TableAggregateFunction):
... def emit_value(self, accumulator):
... yield Row(accumulator[0])
... yield Row(accumulator[1])
...
... def create_accumulator(self):
... return [None, None]
...
... def accumulate(self, accumulator, *args):
... args[0] # type: Row
... if args[0][0] is not None:
... if accumulator[0] is None or args[0][0] > accumulator[0]:
... accumulator[1] = accumulator[0]
... accumulator[0] = args[0][0]
... elif accumulator[1] is None or args[0][0] > accumulator[1]:
... accumulator[1] = args[0][0]
...
... def get_accumulator_type(self):
... return DataTypes.ARRAY(DataTypes.BIGINT())
...
... def get_result_type(self):
... return DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.BIGINT())])
>>> top2 = udtaf(Top2())
>>> tab.flat_aggregate(top2.alias("a", "b")).select(col('a'), col('b'))
:param func: user-defined table aggregate function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return FlatAggregateTable(self._j_table.flatAggregate(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
if hasattr(func, "_alias_names"):
alias_names = getattr(func, "_alias_names")
func = func(with_columns(col("*"))).alias(*alias_names)
else:
func = func(with_columns(col("*")))
return FlatAggregateTable(self._j_table.flatAggregate(func._j_expr), self._t_env)
def to_pandas(self):
"""
Converts the table to a pandas DataFrame. It will collect the content of the table to
the client side and so please make sure that the content of the table could fit in memory
before calling this method.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
>>> table = table_env.from_pandas(pdf, ["a", "b"])
>>> table.filter(col('a') > 0.5).to_pandas()
:return: the result pandas DataFrame.
.. versionadded:: 1.11.0
"""
self._t_env._before_execute()
gateway = get_gateway()
max_arrow_batch_size = self._j_table.getTableEnvironment().getConfig()\
.get(gateway.jvm.org.apache.flink.python.PythonOptions.MAX_ARROW_BATCH_SIZE)
batches_iterator = gateway.jvm.org.apache.flink.table.runtime.arrow.ArrowUtils\
.collectAsPandasDataFrame(self._j_table, max_arrow_batch_size)
if batches_iterator.hasNext():
import pytz
timezone = pytz.timezone(
self._j_table.getTableEnvironment().getConfig().getLocalTimeZone().getId())
serializer = ArrowSerializer(
create_arrow_schema(self.get_schema().get_field_names(),
self.get_schema().get_field_data_types()),
self.get_schema().to_row_data_type(),
timezone)
import pyarrow as pa
table = pa.Table.from_batches(serializer.load_from_iterator(batches_iterator))
pdf = table.to_pandas()
schema = self.get_schema()
for field_name in schema.get_field_names():
pdf[field_name] = tz_convert_from_internal(
pdf[field_name], schema.get_field_data_type(field_name), timezone)
return pdf
else:
import pandas as pd
return pd.DataFrame.from_records([], columns=self.get_schema().get_field_names())
def get_schema(self) -> TableSchema:
"""
Returns the :class:`~pyflink.table.TableSchema` of this table.
:return: The schema of this table.
"""
return TableSchema(j_table_schema=self._j_table.getSchema())
def print_schema(self):
"""
Prints the schema of this table to the console in a tree format.
"""
self._j_table.printSchema()
def execute_insert(self,
table_path_or_descriptor: Union[str, TableDescriptor],
overwrite: bool = False) -> TableResult:
"""
1. When target_path_or_descriptor is a tale path:
Writes the :class:`~pyflink.table.Table` to a :class:`~pyflink.table.TableSink` that was
registered under the specified name, and then execute the insert operation. For the path
resolution algorithm see :func:`~TableEnvironment.use_database`.
Example:
::
>>> tab.execute_insert("sink")
2. When target_path_or_descriptor is a table descriptor:
Declares that the pipeline defined by the given Table object should be written to a
table (backed by a DynamicTableSink) expressed via the given TableDescriptor. It
executes the insert operation.
TableDescriptor is registered as an inline (i.e. anonymous) temporary catalog table
(see :func:`~TableEnvironment.create_temporary_table`) using a unique identifier.
Note that calling this method multiple times, even with the same descriptor, results
in multiple sink tables being registered.
This method allows to declare a :class:`~pyflink.table.Schema` for the sink descriptor.
The declaration is similar to a {@code CREATE TABLE} DDL in SQL and allows to:
1. overwrite automatically derived columns with a custom DataType
2. add metadata columns next to the physical columns
3. declare a primary key
It is possible to declare a schema without physical/regular columns. In this case, those
columns will be automatically derived and implicitly put at the beginning of the schema
declaration.
Examples:
::
>>> schema = Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build()
>>> table = table_env.from_descriptor(TableDescriptor.for_connector("datagen")
... .schema(schema)
... .build())
>>> table.execute_insert(TableDescriptor.for_connector("blackhole")
... .schema(schema)
... .build())
If multiple pipelines should insert data into one or more sink tables as part of a
single execution, use a :class:`~pyflink.table.StatementSet`
(see :func:`~TableEnvironment.create_statement_set`).
By default, all insertion operations are executed asynchronously. Use
:func:`~TableResult.await` or :func:`~TableResult.get_job_client` to monitor the
execution.
.. note:: execute_insert for a table descriptor (case 2.) was added from
flink 1.14.0.
:param table_path_or_descriptor: The path of the registered
:class:`~pyflink.table.TableSink` or the descriptor describing the sink table into which
data should be inserted to which the :class:`~pyflink.table.Table` is written.
:param overwrite: Indicates whether the insert should overwrite existing data or not.
:return: The table result.
.. versionadded:: 1.11.0
"""
self._t_env._before_execute()
if isinstance(table_path_or_descriptor, str):
return TableResult(self._j_table.executeInsert(table_path_or_descriptor, overwrite))
else:
return TableResult(self._j_table.executeInsert(
table_path_or_descriptor._j_table_descriptor, overwrite))
def execute(self) -> TableResult:
"""
Collects the contents of the current table local client.
Example:
::
>>> tab.execute()
:return: The content of the table.
.. versionadded:: 1.11.0
"""
self._t_env._before_execute()
return TableResult(self._j_table.execute())
def explain(self, *extra_details: ExplainDetail) -> str:
"""
Returns the AST of this table and the execution plan.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: The statement for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
TEXT = get_gateway().jvm.org.apache.flink.table.api.ExplainFormat.TEXT
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_table.explain(TEXT, j_extra_details)
class GroupedTable(object):
"""
A table that has been grouped on a set of grouping keys.
"""
def __init__(self, java_table, t_env):
self._j_table = java_table
self._t_env = t_env
def select(self, *fields: Expression) -> 'Table':
"""
Performs a selection operation on a grouped table. Similar to an SQL SELECT statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> tab.group_by(col('key')).select(col('key'), col('value').avg.alias('average'))
:param fields: Expression string that contains group keys and aggregate function calls.
:return: The result table.
"""
return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env)
def aggregate(self, func: Union[Expression, UserDefinedAggregateFunctionWrapper]) \
-> 'AggregatedTable':
"""
Performs a aggregate operation with an aggregate function. You have to close the
aggregate with a select statement.
Example:
::
>>> agg = udaf(lambda a: (a.mean(), a.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> tab.group_by(col('a')).aggregate(agg(col('b')).alias("c", "d")).select(
... col('a'), col('c'), col('d'))
>>> # take all the columns as inputs
>>> # pd is a Pandas.DataFrame
>>> agg_row = udaf(lambda pd: (pd.a.mean(), pd.b.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> tab.group_by(col('a')).aggregate(agg.alias("a", "b")).select(col('a'), col('b'))
:param func: user-defined aggregate function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return AggregatedTable(self._j_table.aggregate(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
if hasattr(func, "_alias_names"):
alias_names = getattr(func, "_alias_names")
func = func(with_columns(col("*"))).alias(*alias_names)
else:
func = func(with_columns(col("*")))
return AggregatedTable(self._j_table.aggregate(func._j_expr), self._t_env)
def flat_aggregate(self, func: Union[Expression, UserDefinedAggregateFunctionWrapper]) \
-> 'FlatAggregateTable':
"""
Performs a flat_aggregate operation on a grouped table. flat_aggregate takes a
:class:`~pyflink.table.TableAggregateFunction` which returns multiple rows. Use a selection
after flatAggregate.
Example:
::
>>> table_agg = udtaf(MyTableAggregateFunction())
>>> tab.group_by(col('c')).flat_aggregate(table_agg(col('a')).alias("a")).select(
... col('c'), col('a'))
>>> # take all the columns as inputs
>>> class Top2(TableAggregateFunction):
... def emit_value(self, accumulator):
... yield Row(accumulator[0])
... yield Row(accumulator[1])
...
... def create_accumulator(self):
... return [None, None]
...
... def accumulate(self, accumulator, *args):
... args[0] # type: Row
... if args[0][0] is not None:
... if accumulator[0] is None or args[0][0] > accumulator[0]:
... accumulator[1] = accumulator[0]
... accumulator[0] = args[0][0]
... elif accumulator[1] is None or args[0][0] > accumulator[1]:
... accumulator[1] = args[0][0]
...
... def get_accumulator_type(self):
... return DataTypes.ARRAY(DataTypes.BIGINT())
...
... def get_result_type(self):
... return DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.BIGINT())])
>>> top2 = udtaf(Top2())
>>> tab.group_by(col('c')) \\
... .flat_aggregate(top2.alias("a", "b")) \\
... .select(col('a'), col('b'))
:param func: user-defined table aggregate function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return FlatAggregateTable(self._j_table.flatAggregate(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
if hasattr(func, "_alias_names"):
alias_names = getattr(func, "_alias_names")
func = func(with_columns(col("*"))).alias(*alias_names)
else:
func = func(with_columns(col("*")))
return FlatAggregateTable(self._j_table.flatAggregate(func._j_expr), self._t_env)
class GroupWindowedTable(object):
"""
A table that has been windowed for :class:`~pyflink.table.GroupWindow`.
"""
def __init__(self, java_group_windowed_table, t_env):
self._j_table = java_group_windowed_table
self._t_env = t_env
def group_by(self, *fields: Expression) -> 'WindowGroupedTable':
"""
Groups the elements by a mandatory window and one or more optional grouping attributes.
The window is specified by referring to its alias.
If no additional grouping attribute is specified and if the input is a streaming table,
the aggregation will be performed by a single task, i.e., with parallelism 1.
Aggregations are performed per group and defined by a subsequent
:func:`~pyflink.table.WindowGroupedTable.select` clause similar to SQL SELECT-GROUP-BY
query.
Example:
::
>>> from pyflink.table.expressions import col, lit
>>> tab.window(Tumble.over(lit(10).minutes).on(col('rowtime')).alias('w')) \\
... .group_by(col('w')) \\
... .select(col('a').sum.alias('a'),
... col('w').start.alias('b'),
... col('w').end.alias('c'),
... col('w').rowtime.alias('d'))
:param fields: Group keys.
:return: A window grouped table.
"""
return WindowGroupedTable(
self._j_table.groupBy(to_expression_jarray(fields)), self._t_env)
class WindowGroupedTable(object):
"""
A table that has been windowed and grouped for :class:`~pyflink.table.window.GroupWindow`.
"""
def __init__(self, java_window_grouped_table, t_env):
self._j_table = java_window_grouped_table
self._t_env = t_env
def select(self, *fields: Expression) -> 'Table':
"""
Performs a selection operation on a window grouped table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> window_grouped_table.select(col('key'),
... col('window').start,
... col('value').avg.alias('valavg'))
:param fields: Expression string.
:return: The result table.
"""
if all(isinstance(f, Expression) for f in fields):
return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env)
else:
assert len(fields) == 1
assert isinstance(fields[0], str)
return Table(self._j_table.select(fields[0]), self._t_env)
def aggregate(self, func: Union[Expression, UserDefinedAggregateFunctionWrapper]) \
-> 'AggregatedTable':
"""
Performs an aggregate operation on a window grouped table. You have to close the
aggregate with a select statement.
Example:
::
>>> agg = udaf(lambda a: (a.mean(), a.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> window_grouped_table.group_by(col("w")) \
... .aggregate(agg(col('b'))) \
... .alias("c", "d") \
... .select(col('c'), col('d'))
>>> # take all the columns as inputs
>>> # pd is a Pandas.DataFrame
>>> agg_row = udaf(lambda pd: (pd.a.mean(), pd.b.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> window_grouped_table.group_by(col("w"), col("a")).aggregate(agg_row)
:param func: user-defined aggregate function.
:return: The result table.
.. versionadded:: 1.13.0
"""
if isinstance(func, Expression):
return AggregatedTable(self._j_table.aggregate(func._j_expr), self._t_env)
else:
func._set_takes_row_as_input()
func = self._to_expr(func)
return AggregatedTable(self._j_table.aggregate(func._j_expr), self._t_env)
def _to_expr(self, func: UserDefinedAggregateFunctionWrapper) -> Expression:
group_window_field = self._j_table.getClass().getDeclaredField("window")
group_window_field.setAccessible(True)
j_group_window = group_window_field.get(self._j_table)
j_time_field = j_group_window.getTimeField()
fields_without_window = without_columns(j_time_field)
if hasattr(func, "_alias_names"):
alias_names = getattr(func, "_alias_names")
func_expression = func(fields_without_window).alias(*alias_names)
else:
func_expression = func(fields_without_window)
return func_expression
class OverWindowedTable(object):
"""
A table that has been windowed for :class:`~pyflink.table.window.OverWindow`.
Unlike group windows, which are specified in the GROUP BY clause, over windows do not collapse
rows. Instead over window aggregates compute an aggregate for each input row over a range of
its neighboring rows.
"""
def __init__(self, java_over_windowed_table, t_env):
self._j_table = java_over_windowed_table
self._t_env = t_env
def select(self, *fields: Expression) -> 'Table':
"""
Performs a selection operation on a over windowed table. Similar to an SQL SELECT
statement.
The field expressions can contain complex expressions and aggregations.
Example:
::
>>> over_windowed_table.select(col('c'),
... col('b').count.over(col('ow')),
... col('e').sum.over(col('ow')))
:param fields: Expression string.
:return: The result table.
"""
return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env)
class AggregatedTable(object):
"""
A table that has been performed on the aggregate function.
"""
def __init__(self, java_table, t_env):
self._j_table = java_table
self._t_env = t_env
def select(self, *fields: Expression) -> 'Table':
"""
Performs a selection operation after an aggregate operation. The field expressions
cannot contain table functions and aggregations.
Example:
""
>>> agg = udaf(lambda a: (a.mean(), a.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> tab.aggregate(agg(col('a')).alias("a", "b")).select(col('a'), col('b'))
>>> # take all the columns as inputs
>>> # pd is a Pandas.DataFrame
>>> agg_row = udaf(lambda pd: (pd.a.mean(), pd.b.max()),
... result_type=DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.FLOAT()),
... DataTypes.FIELD("b", DataTypes.INT())]),
... func_type="pandas")
>>> tab.group_by(col('a')).aggregate(agg.alias("a", "b")).select(col('a'), col('b'))
:param fields: Expression string.
:return: The result table.
"""
return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env)
class FlatAggregateTable(object):
"""
A table that performs flatAggregate on a :class:`~pyflink.table.Table`, a
:class:`~pyflink.table.GroupedTable` or a :class:`~pyflink.table.WindowGroupedTable`
"""
def __init__(self, java_table, t_env):
self._j_table = java_table
self._t_env = t_env
def select(self, *fields: Expression) -> 'Table':
"""
Performs a selection operation on a FlatAggregateTable. Similar to a SQL SELECT statement.
The field expressions can contain complex expressions.
Example:
::
>>> table_agg = udtaf(MyTableAggregateFunction())
>>> tab.flat_aggregate(table_agg(col('a')).alias("a", "b")).select(col('a'), col('b'))
>>> # take all the columns as inputs
>>> class Top2(TableAggregateFunction):
... def emit_value(self, accumulator):
... yield Row(accumulator[0])
... yield Row(accumulator[1])
...
... def create_accumulator(self):
... return [None, None]
...
... def accumulate(self, accumulator, *args):
... args[0] # type: Row
... if args[0][0] is not None:
... if accumulator[0] is None or args[0][0] > accumulator[0]:
... accumulator[1] = accumulator[0]
... accumulator[0] = args[0][0]
... elif accumulator[1] is None or args[0][0] > accumulator[1]:
... accumulator[1] = args[0][0]
...
... def get_accumulator_type(self):
... return DataTypes.ARRAY(DataTypes.BIGINT())
...
... def get_result_type(self):
... return DataTypes.ROW(
... [DataTypes.FIELD("a", DataTypes.BIGINT())])
>>> top2 = udtaf(Top2())
>>> tab.group_by(col('c')) \\
... .flat_aggregate(top2.alias("a", "b")) \\
... .select(col('a'), col('b'))
:param fields: Expression string.
:return: The result table.
"""
return Table(self._j_table.select(to_expression_jarray(fields)), self._t_env)
| 59,588 | 40.039256 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/data_view.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Iterable, List, Any, Iterator, Dict, Tuple
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
__all__ = ['DataView', 'ListView', 'MapView']
class DataView(ABC):
"""
A DataView is a collection type that can be used in the accumulator of an user defined
:class:`pyflink.table.AggregateFunction`. Depending on the context in which the function
is used, a DataView can be backed by a normal collection or a state backend.
"""
@abstractmethod
def clear(self) -> None:
"""
Clears the DataView and removes all data.
"""
pass
class ListView(DataView, Generic[T]):
"""
A :class:`DataView` that provides list-like functionality in the accumulator of an
AggregateFunction when large amounts of data are expected.
"""
def __init__(self):
self._list = []
def get(self) -> Iterable[T]:
"""
Returns an iterable of this list view.
"""
return self._list
def add(self, value: T) -> None:
"""
Adds the given value to this list view.
"""
self._list.append(value)
def add_all(self, values: List[T]) -> None:
"""
Adds all of the elements of the specified list to this list view.
"""
self._list.extend(values)
def clear(self) -> None:
self._list = []
def __eq__(self, other: Any) -> bool:
if isinstance(other, ListView):
iter_obj = other.get()
self_iterator = iter(self)
for value in iter_obj:
try:
self_value = next(self_iterator)
except StopIteration:
# this list view is shorter than another one
return False
if self_value != value:
# the elements are not the same.
return False
try:
next(self_iterator)
except StopIteration:
# the length of this list view is the same as another one
return True
else:
# this list view is longer than another one
return False
else:
# the object is not a ListView
return False
def __hash__(self) -> int:
return hash(self._list)
def __iter__(self) -> Iterator[T]:
return iter(self.get())
class MapView(Generic[K, V]):
"""
A :class:`DataView` that provides dict-like functionality in the accumulator of an
AggregateFunction when large amounts of data are expected.
"""
def __init__(self):
self._dict = dict()
def get(self, key: K) -> V:
"""
Return the value for the specified key.
"""
return self._dict[key]
def put(self, key: K, value: V) -> None:
"""
Inserts a value for the given key into the map view.
If the map view already contains a value for the key, the existing value is overwritten.
"""
self._dict[key] = value
def put_all(self, dict_value: Dict[K, V]) -> None:
"""
Inserts all mappings from the specified map to this map view.
"""
self._dict.update(dict_value)
def remove(self, key: K) -> None:
"""
Deletes the value for the given key.
"""
del self._dict[key]
def contains(self, key: K) -> bool:
"""
Checks if the map view contains a value for a given key.
"""
return key in self._dict
def items(self) -> Iterable[Tuple[K, V]]:
"""
Returns all entries of the map view.
"""
return self._dict.items()
def keys(self) -> Iterable[K]:
"""
Returns all the keys in the map view.
"""
return self._dict.keys()
def values(self) -> Iterable[V]:
"""
Returns all the values in the map view.
"""
return self._dict.values()
def is_empty(self) -> bool:
"""
Returns true if the map view contains no key-value mappings, otherwise false.
"""
return len(self._dict) == 0
def clear(self) -> None:
"""
Removes all entries of this map.
"""
self._dict.clear()
def __eq__(self, other: Any) -> bool:
if other is None:
return False
if other.__class__ == MapView:
return self._dict == other._dict
else:
# comparing the content of state backed map view is too expensive
return other is self
def __getitem__(self, key: K) -> V:
return self.get(key)
def __setitem__(self, key: K, value: V) -> None:
self.put(key, value)
def __delitem__(self, key: K) -> None:
self.remove(key)
def __contains__(self, key: K) -> bool:
return self.contains(key)
def __iter__(self) -> Iterator[K]:
return iter(self.keys())
| 5,967 | 29.141414 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/table/functions.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import time
from abc import abstractmethod
from decimal import Decimal
from pyflink.common.constants import MAX_LONG_VALUE, MIN_LONG_VALUE
from pyflink.table import AggregateFunction, MapView, ListView
class AvgAggFunction(AggregateFunction):
def get_value(self, accumulator):
# sum / count
if accumulator[0] != 0:
return accumulator[1] / accumulator[0]
else:
return None
def create_accumulator(self):
# [count, sum]
return [0, 0]
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[0] += 1
accumulator[1] += args[0]
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0] -= 1
accumulator[1] -= args[0]
def merge(self, accumulator, accumulators):
for acc in accumulators:
if acc[1] is not None:
accumulator[0] += acc[0]
accumulator[1] += acc[1]
class Count1AggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] += 1
def retract(self, accumulator, *args):
accumulator[0] -= 1
def merge(self, accumulator, accumulators):
for acc in accumulators:
accumulator[0] += acc[0]
class CountAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[0] += 1
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0] -= 1
def merge(self, accumulator, accumulators):
for acc in accumulators:
accumulator[0] += acc[0]
class FirstValueAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
# [first_value]
return [None]
def accumulate(self, accumulator, *args):
if accumulator[0] is None and args[0] is not None:
accumulator[0] = args[0]
def retract(self, accumulator, *args):
raise NotImplementedError("This function does not support retraction.")
def merge(self, accumulator, accumulators):
raise NotImplementedError("This function does not support merge.")
class FirstValueWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
# [first_value, first_order, value_to_order_map, order_to_value_map]
return [None, None, MapView(), MapView()]
def accumulate(self, accumulator, *args):
if args[0] is not None:
value = args[0]
prev_order = accumulator[1]
value_to_order_map = accumulator[2]
order_to_value_map = accumulator[3]
# calculate the order of current value
order = int(round(time.time() * 1000))
if value in value_to_order_map:
order_list = value_to_order_map[value]
else:
order_list = []
order_list.append(order)
value_to_order_map[value] = order_list
if prev_order is None or prev_order > order:
accumulator[0] = value
accumulator[1] = order
if order in order_to_value_map:
value_list = order_to_value_map[order]
else:
value_list = []
value_list.append(value)
order_to_value_map[order] = value_list
def retract(self, accumulator, *args):
if args[0] is not None:
value = args[0]
prev_value = accumulator[0]
prev_order = accumulator[1]
value_to_order_map = accumulator[2]
order_to_value_map = accumulator[3]
# calculate the order of current value
if value in value_to_order_map and value_to_order_map[value]:
order_list = value_to_order_map[value]
else:
# this data has not been accumulated
return
# get and remove current order in value_to_order_map
order = order_list.pop(0)
if order_list:
value_to_order_map[value] = order_list
else:
del value_to_order_map[value]
# remove current value in order_to_value_map
if order in order_to_value_map:
value_list = order_to_value_map[order]
else:
# this data has not been accumulated
return
if value in value_list:
value_list.remove(value)
if value_list:
order_to_value_map[order] = value_list
else:
del order_to_value_map[order]
if value == prev_value:
start_key = prev_order
next_key = MAX_LONG_VALUE
for key in order_to_value_map:
if start_key <= key < next_key:
next_key = key
if next_key != MAX_LONG_VALUE:
accumulator[0] = order_to_value_map[next_key][0]
accumulator[1] = next_key
else:
accumulator[0] = None
accumulator[1] = None
def merge(self, accumulator, accumulators):
raise NotImplementedError("This function does not support merge.")
class LastValueAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
# [last_value]
return [None]
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[0] = args[0]
def retract(self, accumulator, *args):
raise NotImplementedError("This function does not support retraction.")
def merge(self, accumulator, accumulators):
raise NotImplementedError("This function does not support merge.")
class LastValueWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
# [last_value, last_order, value_to_order_map, order_to_value_mapl9]
return [None, None, MapView(), MapView()]
def accumulate(self, accumulator, *args):
if args[0] is not None:
value = args[0]
prev_order = accumulator[1]
value_to_order_map = accumulator[2]
order_to_value_map = accumulator[3]
# calculate the order of current value
order = int(time.time() * 1000)
if value in value_to_order_map:
order_list = value_to_order_map[value]
else:
order_list = []
order_list.append(order)
value_to_order_map[value] = order_list
if prev_order is None or prev_order <= order:
accumulator[0] = value
accumulator[1] = order
if order in order_to_value_map:
value_list = order_to_value_map[order]
else:
value_list = []
value_list.append(value)
order_to_value_map[order] = value_list
def retract(self, accumulator, *args):
if args[0] is not None:
value = args[0]
prev_value = accumulator[0]
prev_order = accumulator[1]
value_to_order_map = accumulator[2]
order_to_value_map = accumulator[3]
# calculate the order of current value
if value in value_to_order_map and value_to_order_map[value]:
order_list = value_to_order_map[value]
else:
# this data has not been accumulated
return
# get and remove current order in value_to_order_map
order = order_list.pop(0)
if order_list:
value_to_order_map[value] = order_list
else:
del value_to_order_map[value]
if order in order_to_value_map:
value_list = order_to_value_map[order]
else:
return
if value in value_list:
value_list.remove(value)
if value_list:
order_to_value_map[order] = value_list
else:
del order_to_value_map[order]
if value == prev_value:
start_key = prev_order
next_key = MIN_LONG_VALUE
for key in order_to_value_map:
if start_key >= key > next_key:
next_key = key
if next_key != MIN_LONG_VALUE:
values = order_to_value_map[next_key]
accumulator[0] = values[len(values) - 1]
accumulator[1] = next_key
else:
accumulator[0] = None
accumulator[1] = None
def merge(self, accumulator, accumulators):
raise NotImplementedError("This function does not support merge.")
class ListAggFunction(AggregateFunction):
def get_value(self, accumulator):
if accumulator[1]:
return accumulator[0].join(accumulator[1])
else:
return None
def create_accumulator(self):
# delimiter, values
return [',', []]
def accumulate(self, accumulator, *args):
if args[0] is not None:
if len(args) > 1:
accumulator[0] = args[1]
accumulator[1].append(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError("This function does not support retraction.")
class ListAggWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
values = [i for i in accumulator[0]]
if values:
return ','.join(values)
else:
return None
def create_accumulator(self):
# [list, retract_list]
return [ListView(), ListView()]
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
if args[0] is not None:
values = [i for i in accumulator[0]]
try:
values.remove(args[0])
accumulator[0].clear()
accumulator[0].add_all(values)
except ValueError:
accumulator[1].add(args[0])
def merge(self, accumulator, accumulators):
for acc in accumulators:
buffer = [e for e in acc[0]]
retract_buffer = [e for e in acc[1]]
if buffer or retract_buffer:
for e in accumulator[0]:
buffer.append(e)
for e in accumulator[1]:
retract_buffer.append(e)
# merge list & retract list
new_retract_buffer = []
for e in retract_buffer:
if e in buffer:
buffer.remove(e)
else:
new_retract_buffer.append(e)
accumulator[0].clear()
accumulator[0].add_all(buffer)
accumulator[1].clear()
accumulator[1].add_all(new_retract_buffer)
class ListAggWsWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
values = [i for i in accumulator[0]]
if values:
return accumulator[2].join(values)
else:
return None
def create_accumulator(self):
# [list, retract_list, delimiter]
return [ListView(), ListView(), ',']
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[2] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[2] = args[1]
values = [i for i in accumulator[0]]
if args[0] in values:
values.remove(args[0])
accumulator[0].clear()
accumulator[0].add_all(values)
else:
accumulator[1].add(args[0])
def merge(self, accumulator, accumulators):
for acc in accumulators:
buffer = [e for e in acc[0]]
retract_buffer = [e for e in acc[1]]
if buffer or retract_buffer:
accumulator[2] = acc[2]
for e in accumulator[0]:
buffer.append(e)
for e in accumulator[1]:
retract_buffer.append(e)
# merge list & retract list
new_retract_buffer = []
for e in retract_buffer:
if e in buffer:
buffer.remove(e)
else:
new_retract_buffer.append(e)
accumulator[0].clear()
accumulator[0].add_all(buffer)
accumulator[1].clear()
accumulator[1].add_all(retract_buffer)
class MaxAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [None]
def accumulate(self, accumulator, *args):
if args[0] is not None:
if accumulator[0] is None or args[0] > accumulator[0]:
accumulator[0] = args[0]
def retract(self, accumulator, *args):
raise NotImplementedError("This function does not support retraction.")
def merge(self, accumulator, accumulators):
for acc in accumulators:
if acc[0] is not None:
if accumulator[0] is None or acc[0] > accumulator[0]:
accumulator[0] = acc[0]
class MaxWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
if accumulator[1] > 0:
return accumulator[0]
else:
return None
def create_accumulator(self):
# [max, map_size, value_to_count_map]
return [None, 0, MapView()]
def accumulate(self, accumulator, *args):
if args[0] is not None:
value = args[0]
if accumulator[1] == 0 or accumulator[0] < value:
accumulator[0] = value
if value in accumulator[2]:
count = accumulator[2][value]
else:
count = 0
count += 1
if count == 0:
del accumulator[2][value]
else:
accumulator[2][value] = count
if count == 1:
accumulator[1] += 1
def retract(self, accumulator, *args):
if args[0] is not None:
value = args[0]
if value in accumulator[2]:
count = accumulator[2][value]
else:
count = 0
count -= 1
if count == 0:
del accumulator[2][value]
accumulator[1] -= 1
if accumulator[1] == 0:
accumulator[0] = None
return
if value == accumulator[0]:
self.update_max(accumulator)
@staticmethod
def update_max(acc):
has_max = False
for value in acc[2]:
if not has_max or acc[0] < value:
acc[0] = value
has_max = True
# The behavior of deleting expired data in the state backend is uncertain.
# so `mapSize` data may exist, while `map` data may have been deleted
# when both of them are expired.
if not has_max:
acc[0] = None
# we should also override max value, because it may have an old value.
acc[1] = 0
def merge(self, acc, accumulators):
need_update_max = False
for a in accumulators:
# set max element
if acc[1] == 0 or (a[1] > 0 and a[0] is not None and acc[0] < a[0]):
acc[0] = a[0]
# merge the count for each key
for value, count in a[2].items():
if value in acc[2]:
this_count = acc[2][value]
else:
this_count = 0
merged_count = count + this_count
if merged_count == 0:
# remove it when count is increased from -1 to 0
del acc[2][value]
# origin is > 0, and retract to 0
if this_count > 0:
acc[1] -= 1
if value == acc[0]:
need_update_max = True
elif merged_count < 0:
acc[2][value] = merged_count
if this_count > 0:
# origin is > 0, and retract to < 0
acc[1] -= 1
if value == acc[0]:
need_update_max = True
else: # merged_count > 0
acc[2][value] = merged_count
if this_count <= 0:
# origin is <= 0, and accumulate to > 0
acc[1] += 1
if need_update_max:
self.update_max(acc)
class MinAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
# [min]
return [None]
def accumulate(self, accumulator, *args):
if args[0] is not None:
if accumulator[0] is None or accumulator[0] > args[0]:
accumulator[0] = args[0]
def retract(self, accumulator, *args):
raise NotImplementedError("This function does not support retraction.")
def merge(self, accumulator, accumulators):
for acc in accumulators:
if acc[0] is not None:
if accumulator[0] is None or accumulator[0] > acc[0]:
accumulator[0] = acc[0]
class MinWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
if accumulator[1] > 0:
return accumulator[0]
else:
return None
def create_accumulator(self):
# [min, map_size, value_to_count_map]
return [None, 0, MapView()]
def accumulate(self, accumulator, *args):
if args[0] is not None:
value = args[0]
if accumulator[1] == 0 or accumulator[0] > value:
accumulator[0] = value
if value in accumulator[2]:
count = accumulator[2][value]
else:
count = 0
count += 1
if count == 0:
del accumulator[2][value]
else:
accumulator[2][value] = count
if count == 1:
accumulator[1] += 1
def retract(self, accumulator, *args):
if args[0] is not None:
value = args[0]
if value in accumulator[2]:
count = accumulator[2][value]
else:
count = 0
count -= 1
if count == 0:
del accumulator[2][value]
accumulator[1] -= 1
if accumulator[1] == 0:
accumulator[0] = None
return
if value == accumulator[0]:
self.update_min(accumulator)
@staticmethod
def update_min(acc):
has_max = False
for value in acc[2]:
if not has_max or acc[0] > value:
acc[0] = value
has_max = True
# The behavior of deleting expired data in the state backend is uncertain.
# so `mapSize` data may exist, while `map` data may have been deleted
# when both of them are expired.
if not has_max:
acc[0] = None
# we should also override min value, because it may have an old value.
acc[1] = 0
def merge(self, acc, accumulators):
need_update_min = False
for a in accumulators:
# set min element
if acc[1] == 0 or (a[1] > 0 and a[0] is not None and acc[0] > a[0]):
acc[0] = a[0]
# merge the count for each key
for value, count in a[2].items():
if value in acc[2]:
this_count = acc[2][value]
else:
this_count = 0
merged_count = count + this_count
if merged_count == 0:
# remove it when count is increased from -1 to 0
del acc[2][value]
# origin is > 0, and retract to 0
if this_count > 0:
acc[1] -= 1
if value == acc[0]:
need_update_min = True
elif merged_count < 0:
acc[2][value] = merged_count
if this_count > 0:
# origin is > 0, and retract to < 0
acc[1] -= 1
if value == acc[0]:
need_update_min = True
else: # merged_count > 0
acc[2][value] = merged_count
if this_count <= 0:
# origin is <= 0, and accumulate to > 0
acc[1] += 1
if need_update_min:
self.update_min(acc)
class Sum0AggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
@abstractmethod
def create_accumulator(self):
pass
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[0] += args[0]
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0] -= args[0]
def merge(self, accumulator, accumulators):
for acc in accumulators:
accumulator[0] += acc[0]
class IntSum0AggFunction(Sum0AggFunction):
def create_accumulator(self):
# [sum]
return [0]
class FloatSum0AggFunction(Sum0AggFunction):
def create_accumulator(self):
# [sum]
return [0.0]
class DecimalSum0AggFunction(Sum0AggFunction):
def create_accumulator(self):
# [sum]
return [Decimal('0')]
class SumAggFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
# [sum]
return [None]
def accumulate(self, accumulator, *args):
if args[0] is not None:
if accumulator[0] is None:
accumulator[0] = args[0]
else:
accumulator[0] += args[0]
def retract(self, accumulator, *args):
raise NotImplementedError("This function does not support retraction.")
def merge(self, accumulator, accumulators):
for acc in accumulators:
if acc[0] is not None:
if accumulator[0] is None:
accumulator[0] = acc[0]
else:
accumulator[0] += acc[0]
class SumWithRetractAggFunction(AggregateFunction):
def get_value(self, accumulator):
if accumulator[1] == 0:
return None
else:
return accumulator[0]
def create_accumulator(self):
# [sum, count]
return [0, 0]
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[0] += args[0]
accumulator[1] += 1
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0] -= args[0]
accumulator[1] -= 1
def merge(self, accumulator, accumulators):
for acc in accumulators:
if acc[0] is not None:
accumulator[0] += acc[0]
accumulator[1] += acc[1]
| 25,335 | 30.78921 | 82 |
py
|
flink
|
flink-master/flink-python/pyflink/table/table_descriptor.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict, Union, List, Optional
from pyflink.common.config_options import ConfigOption
from pyflink.java_gateway import get_gateway
from pyflink.table.schema import Schema
from pyflink.util.java_utils import to_jarray
__all__ = ['TableDescriptor', 'FormatDescriptor']
class TableDescriptor(object):
"""
Describes a CatalogTable representing a source or sink.
TableDescriptor is a template for creating a CatalogTable instance. It closely resembles the
"CREATE TABLE" SQL DDL statement, containing schema, connector options, and other
characteristics. Since tables in Flink are typically backed by external systems, the
descriptor describes how a connector (and possibly its format) are configured.
This can be used to register a table in the Table API, see :func:`create_temporary_table` in
TableEnvironment.
"""
def __init__(self, j_table_descriptor):
self._j_table_descriptor = j_table_descriptor
@staticmethod
def for_connector(connector: str) -> 'TableDescriptor.Builder':
"""
Creates a new :class:`~pyflink.table.TableDescriptor.Builder` for a table using the given
connector.
:param connector: The factory identifier for the connector.
"""
gateway = get_gateway()
j_builder = gateway.jvm.TableDescriptor.forConnector(connector)
return TableDescriptor.Builder(j_builder)
def get_schema(self) -> Optional[Schema]:
j_schema = self._j_table_descriptor.getSchema()
if j_schema.isPresent():
return Schema(j_schema.get())
else:
return None
def get_options(self) -> Dict[str, str]:
return self._j_table_descriptor.getOptions()
def get_partition_keys(self) -> List[str]:
return self._j_table_descriptor.getPartitionKeys()
def get_comment(self) -> Optional[str]:
j_comment = self._j_table_descriptor.getComment()
if j_comment.isPresent():
return j_comment.get()
else:
return None
def __str__(self):
return self._j_table_descriptor.toString()
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._j_table_descriptor.equals(other._j_table_descriptor))
def __hash__(self):
return self._j_table_descriptor.hashCode()
class Builder(object):
"""
Builder for TableDescriptor.
"""
def __init__(self, j_builder):
self._j_builder = j_builder
def schema(self, schema: Schema) -> 'TableDescriptor.Builder':
"""
Define the schema of the TableDescriptor.
"""
self._j_builder.schema(schema._j_schema)
return self
def option(self, key: Union[str, ConfigOption], value) -> 'TableDescriptor.Builder':
"""
Sets the given option on the table.
Option keys must be fully specified. When defining options for a Format, use
format(FormatDescriptor) instead.
Example:
::
>>> TableDescriptor.for_connector("kafka") \
... .option("scan.startup.mode", "latest-offset") \
... .build()
"""
if isinstance(key, str):
self._j_builder.option(key, value)
else:
self._j_builder.option(key._j_config_option, value)
return self
def format(self,
format: Union[str, 'FormatDescriptor'],
format_option: ConfigOption[str] = None) -> 'TableDescriptor.Builder':
"""
Defines the format to be used for this table.
Note that not every connector requires a format to be specified, while others may use
multiple formats.
Example:
::
>>> TableDescriptor.for_connector("kafka") \
... .format(FormatDescriptor.for_format("json")
... .option("ignore-parse-errors", "true")
... .build())
will result in the options:
'format' = 'json'
'json.ignore-parse-errors' = 'true'
"""
if format_option is None:
if isinstance(format, str):
self._j_builder.format(format)
else:
self._j_builder.format(format._j_format_descriptor)
else:
if isinstance(format, str):
self._j_builder.format(format_option._j_config_option, format)
else:
self._j_builder.format(
format_option._j_config_option, format._j_format_descriptor)
return self
def partitioned_by(self, *partition_keys: str) -> 'TableDescriptor.Builder':
"""
Define which columns this table is partitioned by.
"""
gateway = get_gateway()
self._j_builder.partitionedBy(to_jarray(gateway.jvm.java.lang.String, partition_keys))
return self
def comment(self, comment: str) -> 'TableDescriptor.Builder':
"""
Define the comment for this table.
"""
self._j_builder.comment(comment)
return self
def build(self) -> 'TableDescriptor':
"""
Returns an immutable instance of :class:`~pyflink.table.TableDescriptor`.
"""
return TableDescriptor(self._j_builder.build())
class FormatDescriptor(object):
"""
Describes a Format and its options for use with :class:`~pyflink.table.TableDescriptor`.
Formats are responsible for encoding and decoding data in table connectors. Note that not
every connector has a format, while others may have multiple formats (e.g. the Kafka connector
has separate formats for keys and values). Common formats are "json", "csv", "avro", etc.
"""
def __init__(self, j_format_descriptor):
self._j_format_descriptor = j_format_descriptor
@staticmethod
def for_format(format: str) -> 'FormatDescriptor.Builder':
"""
Creates a new :class:`~pyflink.table.FormatDescriptor.Builder` describing a format with the
given format identifier.
:param format: The factory identifier for the format.
"""
gateway = get_gateway()
j_builder = gateway.jvm.FormatDescriptor.forFormat(format)
return FormatDescriptor.Builder(j_builder)
def get_format(self) -> str:
return self._j_format_descriptor.getFormat()
def get_options(self) -> Dict[str, str]:
return self._j_format_descriptor.getOptions()
def __str__(self):
return self._j_format_descriptor.toString()
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._j_format_descriptor.equals(other._j_format_descriptor))
def __hash__(self):
return self._j_format_descriptor.hashCode()
class Builder(object):
"""
Builder for FormatDescriptor.
"""
def __init__(self, j_builder):
self._j_builder = j_builder
def option(self, key: Union[str, ConfigOption], value) -> 'FormatDescriptor.Builder':
"""
Sets the given option on the format.
Note that format options must not be prefixed with the format identifier itself here.
Example:
::
>>> FormatDescriptor.for_format("json") \
... .option("ignore-parse-errors", "true") \
... .build()
will automatically be converted into its prefixed form:
'format' = 'json'
'json.ignore-parse-errors' = 'true'
"""
if isinstance(key, str):
self._j_builder.option(key, value)
else:
self._j_builder.option(key._j_config_option, value)
return self
def build(self) -> 'FormatDescriptor':
"""
Returns an immutable instance of :class:`~pyflink.table.FormatDescriptor`.
"""
return FormatDescriptor(self._j_builder.build())
| 9,313 | 35.100775 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/table/statement_set.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Union
from pyflink.java_gateway import get_gateway
from pyflink.table import ExplainDetail
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.table_result import TableResult
from pyflink.util.java_utils import to_j_explain_detail_arr
__all__ = ['StatementSet']
class StatementSet(object):
"""
A :class:`~StatementSet` accepts pipelines defined by DML statements or :class:`~Table` objects.
The planner can optimize all added statements together and then submit them as one job.
The added statements will be cleared when calling the :func:`~StatementSet.execute` method.
.. versionadded:: 1.11.0
"""
def __init__(self, _j_statement_set, t_env):
self._j_statement_set = _j_statement_set
self._t_env = t_env
def add_insert_sql(self, stmt: str) -> 'StatementSet':
"""
add insert statement to the set.
:param stmt: The statement to be added.
:return: current StatementSet instance.
.. versionadded:: 1.11.0
"""
self._j_statement_set.addInsertSql(stmt)
return self
def attach_as_datastream(self):
"""
Optimizes all statements as one entity and adds them as transformations to the underlying
StreamExecutionEnvironment.
Use :func:`~pyflink.datastream.StreamExecutionEnvironment.execute` to execute them.
The added statements will be cleared after calling this method.
.. versionadded:: 1.16.0
"""
self._j_statement_set.attachAsDataStream()
def add_insert(self,
target_path_or_descriptor: Union[str, TableDescriptor],
table,
overwrite: bool = False) -> 'StatementSet':
"""
Adds a statement that the pipeline defined by the given Table object should be written to a
table (backed by a DynamicTableSink) that was registered under the specified path or
expressed via the given TableDescriptor.
1. When target_path_or_descriptor is a tale path:
See the documentation of :func:`~TableEnvironment.use_database` or
:func:`~TableEnvironment.use_catalog` for the rules on the path resolution.
2. When target_path_or_descriptor is a table descriptor:
The given TableDescriptor is registered as an inline (i.e. anonymous) temporary catalog
table (see :func:`~TableEnvironment.create_temporary_table`).
Then a statement is added to the statement set that inserts the Table object's pipeline
into that temporary table.
This method allows to declare a Schema for the sink descriptor. The declaration is
similar to a {@code CREATE TABLE} DDL in SQL and allows to:
1. overwrite automatically derived columns with a custom DataType
2. add metadata columns next to the physical columns
3. declare a primary key
It is possible to declare a schema without physical/regular columns. In this case, those
columns will be automatically derived and implicitly put at the beginning of the schema
declaration.
Examples:
::
>>> stmt_set = table_env.create_statement_set()
>>> source_table = table_env.from_path("SourceTable")
>>> sink_descriptor = TableDescriptor.for_connector("blackhole") \\
... .schema(Schema.new_builder()
... .build()) \\
... .build()
>>> stmt_set.add_insert(sink_descriptor, source_table)
.. note:: add_insert for a table descriptor (case 2.) was added from
flink 1.14.0.
:param target_path_or_descriptor: The path of the registered
:class:`~pyflink.table.TableSink` or the descriptor describing the sink table into which
data should be inserted to which the :class:`~pyflink.table.Table` is written.
:param table: The Table to add.
:type table: pyflink.table.Table
:param overwrite: Indicates whether the insert should overwrite existing data or not.
:return: current StatementSet instance.
.. versionadded:: 1.11.0
"""
if isinstance(target_path_or_descriptor, str):
self._j_statement_set.addInsert(target_path_or_descriptor, table._j_table, overwrite)
else:
self._j_statement_set.addInsert(
target_path_or_descriptor._j_table_descriptor, table._j_table, overwrite)
return self
def explain(self, *extra_details: ExplainDetail) -> str:
"""
returns the AST and the execution plan of all statements and Tables.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: All statements and Tables for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
TEXT = get_gateway().jvm.org.apache.flink.table.api.ExplainFormat.TEXT
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_statement_set.explain(TEXT, j_extra_details)
def execute(self) -> TableResult:
"""
execute all statements and Tables as a batch.
.. note::
The added statements and Tables will be cleared when executing this method.
:return: execution result.
.. versionadded:: 1.11.0
"""
self._t_env._before_execute()
return TableResult(self._j_statement_set.execute())
| 6,668 | 40.943396 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import ast
from pyflink.common.types import RowKind
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, LocalZonedTimestampType, Row, RowType, \
TimeType, DateType, ArrayType, MapType, TimestampType, FloatType, RawType
from pyflink.util.java_utils import to_jarray
import datetime
import pickle
def pandas_to_arrow(schema, timezone, field_types, series):
import pyarrow as pa
import pandas as pd
def create_array(s, t):
try:
return pa.Array.from_pandas(s, mask=s.isnull(), type=t)
except pa.ArrowException as e:
error_msg = "Exception thrown when converting pandas.Series (%s) to " \
"pyarrow.Array (%s)."
raise RuntimeError(error_msg % (s.dtype, t), e)
arrays = []
for i in range(len(schema)):
s = series[i]
field_type = field_types[i]
schema_type = schema.types[i]
if type(s) == pd.DataFrame:
array_names = [(create_array(s[s.columns[j]], field.type), field.name)
for j, field in enumerate(schema_type)]
struct_arrays, struct_names = zip(*array_names)
arrays.append(pa.StructArray.from_arrays(struct_arrays, struct_names))
else:
arrays.append(create_array(
tz_convert_to_internal(s, field_type, timezone), schema_type))
return pa.RecordBatch.from_arrays(arrays, schema=schema)
def arrow_to_pandas(timezone, field_types, batches):
def arrow_column_to_pandas(arrow_column, t: DataType):
if type(t) == RowType:
import pandas as pd
series = [column.to_pandas(date_as_object=True).rename(field.name)
for column, field in zip(arrow_column.flatten(), arrow_column.type)]
return pd.concat(series, axis=1)
else:
return arrow_column.to_pandas(date_as_object=True)
import pyarrow as pa
table = pa.Table.from_batches(batches)
return [tz_convert_from_internal(arrow_column_to_pandas(c, t), t, timezone)
for c, t in zip(table.itercolumns(), field_types)]
def tz_convert_from_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series from internal according to the specified local timezone.
Returns the same series if the series is not a timestamp series. Otherwise,
returns a converted series.
"""
if type(t) == LocalZonedTimestampType:
return s.dt.tz_localize(local_tz)
else:
return s
def tz_convert_to_internal(s, t: DataType, local_tz):
"""
Converts the timestamp series to internal according to the specified local timezone.
"""
if type(t) == LocalZonedTimestampType:
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype
if is_datetime64_dtype(s.dtype):
return s.dt.tz_localize(None)
elif is_datetime64tz_dtype(s.dtype):
return s.dt.tz_convert(local_tz).dt.tz_localize(None)
return s
def to_expression_jarray(exprs):
"""
Convert python list of Expression to java array of Expression.
"""
gateway = get_gateway()
return to_jarray(gateway.jvm.Expression, [expr._j_expr for expr in exprs])
def pickled_bytes_to_python_converter(data, field_type: DataType):
if isinstance(field_type, RowType):
row_kind = RowKind(int.from_bytes(data[0], byteorder='big', signed=False))
data = zip(list(data[1:]), field_type.field_types())
fields = []
for d, d_type in data:
fields.append(pickled_bytes_to_python_converter(d, d_type))
result_row = Row(fields)
result_row.set_row_kind(row_kind)
return result_row
else:
data = pickle.loads(data)
if isinstance(field_type, TimeType):
seconds, microseconds = divmod(data, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
elif isinstance(field_type, DateType):
return field_type.from_sql_type(data)
elif isinstance(field_type, TimestampType):
return field_type.from_sql_type(int(data.timestamp() * 10**6))
elif isinstance(field_type, MapType):
key_type = field_type.key_type
value_type = field_type.value_type
zip_kv = zip(data[0], data[1])
return dict((pickled_bytes_to_python_converter(k, key_type),
pickled_bytes_to_python_converter(v, value_type))
for k, v in zip_kv)
elif isinstance(field_type, FloatType):
return field_type.from_sql_type(ast.literal_eval(data))
elif isinstance(field_type, ArrayType):
element_type = field_type.element_type
elements = []
for element_bytes in data:
elements.append(pickled_bytes_to_python_converter(element_bytes, element_type))
return elements
elif isinstance(field_type, RawType):
return field_type.from_sql_type(data)
else:
return field_type.from_sql_type(data)
| 6,154 | 40.870748 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/table/table_config.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
from py4j.compat import long
from pyflink.common.configuration import Configuration
from pyflink.java_gateway import get_gateway
from pyflink.table.sql_dialect import SqlDialect
__all__ = ['TableConfig']
from pyflink.util.java_utils import add_jars_to_context_class_loader
class TableConfig(object):
"""
Configuration for the current :class:`TableEnvironment` session to adjust Table & SQL API
programs.
This class is a pure API class that abstracts configuration from various sources. Currently,
configuration can be set in any of the following layers (in the given order):
- flink-conf.yaml
- CLI parameters
- :class:`~pyflink.datastream.StreamExecutionEnvironment` when bridging to DataStream API
- :func:`~EnvironmentSettings.Builder.with_configuration`
- :func:`~TableConfig.set`
The latter two represent the application-specific part of the configuration. They initialize
and directly modify :func:`~TableConfig.get_configuration`. Other layers represent the
configuration of the execution context and are immutable.
The getter :func:`~TableConfig.get` gives read-only access to the full configuration. However,
application-specific configuration has precedence. Configuration of outer layers is used for
defaults and fallbacks. The setter :func:`~TableConfig.set` will only affect
application-specific configuration.
For common or important configuration options, this class provides getters and setters methods
with detailed inline documentation.
For more advanced configuration, users can directly access the underlying key-value map via
:func:`~pyflink.table.TableConfig.get_configuration`.
Example:
::
>>> table_config = t_env.get_config()
>>> config = Configuration()
>>> config.set_string("parallelism.default", "128") \\
... .set_string("pipeline.auto-watermark-interval", "800ms") \\
... .set_string("execution.checkpointing.interval", "30s")
>>> table_config.add_configuration(config)
.. note::
Because options are read at different point in time when performing operations, it is
recommended to set configuration options early after instantiating a table environment.
"""
def __init__(self, j_table_config=None):
gateway = get_gateway()
if j_table_config is None:
self._j_table_config = gateway.jvm.TableConfig.getDefault()
else:
self._j_table_config = j_table_config
def get(self, key: str, default_value: str) -> str:
"""
Returns the value associated with the given key as a string.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
if self.get_configuration().contains_key(key):
return self.get_configuration().get_string(key, default_value)
else:
return self._j_table_config.getRootConfiguration().getString(key, default_value)
def set(self, key: str, value: str) -> 'TableConfig':
"""
Sets a string-based value for the given string-based key.
The value will be parsed by the framework on access.
"""
self._j_table_config.set(key, value)
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
if key in [jars_key, classpaths_key]:
add_jars_to_context_class_loader(value.split(";"))
return self
def get_local_timezone(self) -> str:
"""
Returns the local timezone id for timestamp with local time zone, either an abbreviation
such as "PST", a full name such as "America/Los_Angeles", or a custom timezone_id such
as "GMT-08:00".
"""
return self._j_table_config.getLocalTimeZone().getId()
def set_local_timezone(self, timezone_id: str):
"""
Sets the local timezone id for timestamp with local time zone.
:param timezone_id: The timezone id, either an abbreviation such as "PST", a full name
such as "America/Los_Angeles", or a custom timezone_id such as
"GMT-08:00".
"""
if timezone_id is not None and isinstance(timezone_id, str):
j_timezone = get_gateway().jvm.java.time.ZoneId.of(timezone_id)
self._j_table_config.setLocalTimeZone(j_timezone)
else:
raise Exception("TableConfig.timezone should be a string!")
def get_max_generated_code_length(self) -> int:
"""
The current threshold where generated code will be split into sub-function calls. Java has
a maximum method length of 64 KB. This setting allows for finer granularity if necessary.
Default is 64000.
"""
return self._j_table_config.getMaxGeneratedCodeLength()
def set_max_generated_code_length(self, max_generated_code_length: int):
"""
Returns the current threshold where generated code will be split into sub-function calls.
Java has a maximum method length of 64 KB. This setting allows for finer granularity if
necessary. Default is 64000.
"""
if max_generated_code_length is not None and isinstance(max_generated_code_length, int):
self._j_table_config.setMaxGeneratedCodeLength(max_generated_code_length)
else:
raise Exception("TableConfig.max_generated_code_length should be a int value!")
def set_idle_state_retention_time(self,
min_time: datetime.timedelta,
max_time: datetime.timedelta):
"""
Specifies a minimum and a maximum time interval for how long idle state, i.e., state which
was not updated, will be retained.
State will never be cleared until it was idle for less than the minimum time and will never
be kept if it was idle for more than the maximum time.
When new data arrives for previously cleaned-up state, the new data will be handled as if it
was the first data. This can result in previous results being overwritten.
Set to 0 (zero) to never clean-up the state.
Example:
::
>>> table_config = TableConfig() \\
... .set_idle_state_retention_time(datetime.timedelta(days=1),
... datetime.timedelta(days=3))
.. note::
Cleaning up state requires additional bookkeeping which becomes less expensive for
larger differences of minTime and maxTime. The difference between minTime and maxTime
must be at least 5 minutes.
Method set_idle_state_retention_time is deprecated now. The suggested way to set idle
state retention time is :func:`~pyflink.table.TableConfig.set_idle_state_retention`
Currently, setting max_time will not work and the max_time is directly derived from the
min_time as 1.5 x min_time.
:param min_time: The minimum time interval for which idle state is retained. Set to
0 (zero) to never clean-up the state.
:param max_time: The maximum time interval for which idle state is retained. Must be at
least 5 minutes greater than minTime. Set to
0 (zero) to never clean-up the state.
"""
j_time_class = get_gateway().jvm.org.apache.flink.api.common.time.Time
j_min_time = j_time_class.milliseconds(long(round(min_time.total_seconds() * 1000)))
j_max_time = j_time_class.milliseconds(long(round(max_time.total_seconds() * 1000)))
self._j_table_config.setIdleStateRetentionTime(j_min_time, j_max_time)
def set_idle_state_retention(self, duration: datetime.timedelta):
"""
Specifies a retention time interval for how long idle state, i.e., state which
was not updated, will be retained.
State will never be cleared until it was idle for less than the duration and will never
be kept if it was idle for more than the 1.5 x duration.
When new data arrives for previously cleaned-up state, the new data will be handled as if it
was the first data. This can result in previous results being overwritten.
Set to 0 (zero) to never clean-up the state.
Example:
::
>>> table_config.set_idle_state_retention(datetime.timedelta(days=1))
.. note::
Cleaning up state requires additional bookkeeping which becomes less expensive for
larger differences of minTime and maxTime. The difference between minTime and maxTime
must be at least 5 minutes.
:param duration: The retention time interval for which idle state is retained. Set to
0 (zero) to never clean-up the state.
"""
j_duration_class = get_gateway().jvm.java.time.Duration
j_duration = j_duration_class.ofMillis(long(round(duration.total_seconds() * 1000)))
self._j_table_config.setIdleStateRetention(j_duration)
def get_min_idle_state_retention_time(self) -> int:
"""
State might be cleared and removed if it was not updated for the defined period of time.
.. note::
Currently the concept of min/max idle state retention has been deprecated and only
idle state retention time is supported. The min idle state retention is regarded as
idle state retention and the max idle state retention is derived from idle state
retention as 1.5 x idle state retention.
:return: The minimum time until state which was not updated will be retained.
"""
return self._j_table_config.getMinIdleStateRetentionTime()
def get_max_idle_state_retention_time(self) -> int:
"""
State will be cleared and removed if it was not updated for the defined period of time.
.. note::
Currently the concept of min/max idle state retention has been deprecated and only
idle state retention time is supported. The min idle state retention is regarded as
idle state retention and the max idle state retention is derived from idle state
retention as 1.5 x idle state retention.
:return: The maximum time until state which was not updated will be retained.
"""
return self._j_table_config.getMaxIdleStateRetentionTime()
def get_idle_state_retention(self) -> datetime.timedelta:
"""
:return: The duration until state which was not updated will be retained.
"""
return datetime.timedelta(
milliseconds=self._j_table_config.getIdleStateRetention().toMillis())
def get_configuration(self) -> Configuration:
"""
Gives direct access to the underlying key-value map for advanced configuration.
:return: Entire key-value configuration.
"""
return Configuration(j_configuration=self._j_table_config.getConfiguration())
def add_configuration(self, configuration: Configuration):
"""
Adds the given key-value configuration to the underlying configuration. It overwrites
existing keys.
:param configuration: Key-value configuration to be added.
"""
self._j_table_config.addConfiguration(configuration._j_configuration)
def get_sql_dialect(self) -> SqlDialect:
"""
Returns the current SQL dialect.
"""
return SqlDialect._from_j_sql_dialect(self._j_table_config.getSqlDialect())
def set_sql_dialect(self, sql_dialect: SqlDialect):
"""
Sets the current SQL dialect to parse a SQL query. Flink's SQL behavior by default.
:param sql_dialect: The given SQL dialect.
"""
self._j_table_config.setSqlDialect(SqlDialect._to_j_sql_dialect(sql_dialect))
def set_python_executable(self, python_exec: str):
"""
Sets the path of the python interpreter which is used to execute the python udf workers.
e.g. "/usr/local/bin/python3".
If python UDF depends on a specific python version which does not exist in the cluster,
the method :func:`pyflink.table.TableEnvironment.add_python_archive` can be used to upload
a virtual environment. The path of the python interpreter contained in the uploaded
environment can be specified via this method.
Example:
::
# command executed in shell
# assume that the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> table_env.add_python_archive("py_env.zip")
>>> table_env.get_config().set_python_executable("py_env.zip/py_env/bin/python")
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.7 or higher.
.. note::
The python udf worker depends on Apache Beam (version == 2.43.0).
Please ensure that the specified environment meets the above requirements.
:param python_exec: The path of python interpreter.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
self.set(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), python_exec)
def get_python_executable(self) -> str:
"""
Gets the path of the python interpreter which is used to execute the python udf workers.
If no path is specified before, it will return a None value.
:return: The path of the python interpreter which is used to execute the python udf workers.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
return self.get_configuration().get_string(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), None)
@staticmethod
def get_default() -> 'TableConfig':
"""
:return: A TableConfig object with default settings.
"""
return TableConfig(get_gateway().jvm.TableConfig.getDefault())
| 15,495 | 42.52809 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/module.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
__all__ = ['HiveModule', 'Module', 'ModuleEntry']
class Module(object):
"""
Modules define a set of metadata, including functions, user defined types, operators, rules,
etc.
Metadata from modules are regarded as built-in or system metadata that users can take advantages
of.
.. versionadded:: 1.12.0
"""
def __init__(self, j_module):
self._j_module = j_module
class HiveModule(Module):
"""
Module to provide Hive built-in metadata.
.. versionadded:: 1.12.0
"""
def __init__(self, hive_version: str = None):
gateway = get_gateway()
if hive_version is None:
j_hive_module = gateway.jvm.org.apache.flink.table.module.hive.HiveModule()
else:
j_hive_module = gateway.jvm.org.apache.flink.table.module.hive.HiveModule(hive_version)
super(HiveModule, self).__init__(j_hive_module)
class ModuleEntry(object):
"""
A POJO to represent a module's name and use status.
"""
def __init__(self, name: str, used: bool, j_module_entry=None):
if j_module_entry is None:
gateway = get_gateway()
self._j_module_entry = gateway.jvm.org.apache.flink.table.module.ModuleEntry(name, used)
else:
self._j_module_entry = j_module_entry
def name(self) -> str:
return self._j_module_entry.name()
def used(self) -> bool:
return self._j_module_entry.used()
def __repr__(self):
return self._j_module_entry.toString()
def __eq__(self, other):
return isinstance(other, self.__class__) and self._j_module_entry == other._j_module_entry
def __hash__(self):
return self._j_module_entry.hashCode()
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return not self.__eq__(other)
| 2,861 | 32.27907 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/descriptors.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
from abc import ABCMeta
from collections import OrderedDict
from py4j.java_gateway import get_method
from typing import Dict, Union
from pyflink.java_gateway import get_gateway
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import DataType, _to_java_data_type
__all__ = [
'Rowtime',
'Schema'
]
class Descriptor(object, metaclass=ABCMeta):
"""
Base class of the descriptors that adds a set of string-based, normalized properties for
describing DDL information.
Typical characteristics of a descriptor are:
- descriptors have a default constructor
- descriptors themselves contain very little logic
- corresponding validators validate the correctness (goal: have a single point of validation)
A descriptor is similar to a builder in a builder pattern, thus, mutable for building
properties.
"""
def __init__(self, j_descriptor):
self._j_descriptor = j_descriptor
def to_properties(self) -> Dict:
"""
Converts this descriptor into a dict of properties.
:return: Dict object contains all of current properties.
"""
return dict(self._j_descriptor.toProperties())
class Rowtime(Descriptor):
"""
Rowtime descriptor for describing an event time attribute in the schema.
"""
def __init__(self):
gateway = get_gateway()
self._j_rowtime = gateway.jvm.Rowtime()
super(Rowtime, self).__init__(self._j_rowtime)
def timestamps_from_field(self, field_name: str):
"""
Sets a built-in timestamp extractor that converts an existing LONG or TIMESTAMP field into
the rowtime attribute.
:param field_name: The field to convert into a rowtime attribute.
:return: This rowtime descriptor.
"""
self._j_rowtime = self._j_rowtime.timestampsFromField(field_name)
return self
def timestamps_from_source(self) -> 'Rowtime':
"""
Sets a built-in timestamp extractor that converts the assigned timestamps from a DataStream
API record into the rowtime attribute and thus preserves the assigned timestamps from the
source.
.. note::
This extractor only works in streaming environments.
:return: This rowtime descriptor.
"""
self._j_rowtime = self._j_rowtime.timestampsFromSource()
return self
def timestamps_from_extractor(self, extractor: str) -> 'Rowtime':
"""
Sets a custom timestamp extractor to be used for the rowtime attribute.
:param extractor: The java fully-qualified class name of the TimestampExtractor to extract
the rowtime attribute from the physical type. The TimestampExtractor must
have a public no-argument constructor and can be founded by
in current Java classloader.
:return: This rowtime descriptor.
"""
gateway = get_gateway()
self._j_rowtime = self._j_rowtime.timestampsFromExtractor(
gateway.jvm.Thread.currentThread().getContextClassLoader().loadClass(extractor)
.newInstance())
return self
def watermarks_periodic_ascending(self) -> 'Rowtime':
"""
Sets a built-in watermark strategy for ascending rowtime attributes.
Emits a watermark of the maximum observed timestamp so far minus 1. Rows that have a
timestamp equal to the max timestamp are not late.
:return: This rowtime descriptor.
"""
self._j_rowtime = self._j_rowtime.watermarksPeriodicAscending()
return self
def watermarks_periodic_bounded(self, delay: int) -> 'Rowtime':
"""
Sets a built-in watermark strategy for rowtime attributes which are out-of-order by a
bounded time interval.
Emits watermarks which are the maximum observed timestamp minus the specified delay.
:param delay: Delay in milliseconds.
:return: This rowtime descriptor.
"""
self._j_rowtime = self._j_rowtime.watermarksPeriodicBounded(delay)
return self
def watermarks_from_source(self) -> 'Rowtime':
"""
Sets a built-in watermark strategy which indicates the watermarks should be preserved from
the underlying DataStream API and thus preserves the assigned watermarks from the source.
:return: This rowtime descriptor.
"""
self._j_rowtime = self._j_rowtime.watermarksFromSource()
return self
def watermarks_from_strategy(self, strategy: str) -> 'Rowtime':
"""
Sets a custom watermark strategy to be used for the rowtime attribute.
:param strategy: The java fully-qualified class name of the WatermarkStrategy. The
WatermarkStrategy must have a public no-argument constructor and can be
founded by in current Java classloader.
:return: This rowtime descriptor.
"""
gateway = get_gateway()
self._j_rowtime = self._j_rowtime.watermarksFromStrategy(
gateway.jvm.Thread.currentThread().getContextClassLoader().loadClass(strategy)
.newInstance())
return self
class Schema(Descriptor):
"""
Describes a schema of a table.
.. note::
Field names are matched by the exact name by default (case sensitive).
"""
def __init__(self, schema=None, fields=None, rowtime=None):
"""
Constructor of Schema descriptor.
:param schema: The :class:`TableSchema` object.
:param fields: Dict of fields with the field name and the data type or type string stored.
:param rowtime: A :class:`RowTime` that Specifies the previously defined field as an
event-time attribute.
"""
gateway = get_gateway()
self._j_schema = gateway.jvm.org.apache.flink.table.descriptors.Schema()
super(Schema, self).__init__(self._j_schema)
if schema is not None:
self.schema(schema)
if fields is not None:
self.fields(fields)
if rowtime is not None:
self.rowtime(rowtime)
def schema(self, table_schema: 'TableSchema') -> 'Schema':
"""
Sets the schema with field names and the types. Required.
This method overwrites existing fields added with
:func:`~pyflink.table.descriptors.Schema.field`.
:param table_schema: The :class:`TableSchema` object.
:return: This schema object.
"""
self._j_schema = self._j_schema.schema(table_schema._j_table_schema)
return self
def field(self, field_name: str, field_type: Union[DataType, str]) -> 'Schema':
"""
Adds a field with the field name and the data type or type string. Required.
This method can be called multiple times. The call order of this method defines
also the order of the fields in a row. Here is a document that introduces the type strings:
https://nightlies.apache.org/flink/flink-docs-stable/dev/table/connect.html#type-strings
:param field_name: The field name.
:param field_type: The data type or type string of the field.
:return: This schema object.
"""
if isinstance(field_type, str):
self._j_schema = self._j_schema.field(field_name, field_type)
else:
self._j_schema = self._j_schema.field(field_name, _to_java_data_type(field_type))
return self
def fields(self, fields: Dict[str, Union[DataType, str]]) -> 'Schema':
"""
Adds a set of fields with the field name and the data type or type string stored in a
list.
:param fields: Dict of fields with the field name and the data type or type string
stored.
E.g, [('int_field', DataTypes.INT()), ('string_field', DataTypes.STRING())].
:return: This schema object.
.. versionadded:: 1.11.0
"""
if sys.version_info[:2] <= (3, 5) and not isinstance(fields, OrderedDict):
raise TypeError("Must use OrderedDict type in python3.5 or older version to key the "
"schema in insert order.")
elif sys.version_info[:2] > (3, 5) and not isinstance(fields, (OrderedDict, dict)):
raise TypeError("fields must be stored in a dict or OrderedDict")
for field_name, field_type in fields.items():
self.field(field_name=field_name, field_type=field_type)
return self
def from_origin_field(self, origin_field_name: str) -> 'Schema':
"""
Specifies the origin of the previously defined field. The origin field is defined by a
connector or format.
E.g. field("myString", Types.STRING).from_origin_field("CSV_MY_STRING")
.. note::
Field names are matched by the exact name by default (case sensitive).
:param origin_field_name: The origin field name.
:return: This schema object.
"""
self._j_schema = get_method(self._j_schema, "from")(origin_field_name)
return self
def proctime(self) -> 'Schema':
"""
Specifies the previously defined field as a processing-time attribute.
E.g. field("proctime", Types.SQL_TIMESTAMP_LTZ).proctime()
:return: This schema object.
"""
self._j_schema = self._j_schema.proctime()
return self
def rowtime(self, rowtime: Rowtime) -> 'Schema':
"""
Specifies the previously defined field as an event-time attribute.
E.g. field("rowtime", Types.SQL_TIMESTAMP).rowtime(...)
:param rowtime: A :class:`RowTime`.
:return: This schema object.
"""
self._j_schema = self._j_schema.rowtime(rowtime._j_rowtime)
return self
| 10,888 | 37.073427 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/table/result_kind.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
__all__ = ['ResultKind']
class ResultKind(object):
"""
ResultKind defines the types of the result.
:data:`SUCCESS`:
The statement (e.g. DDL, USE) executes successfully, and the result only contains a simple "OK".
:data:`SUCCESS_WITH_CONTENT`:
The statement (e.g. DML, DQL, SHOW) executes successfully, and the result contains important
content.
.. versionadded:: 1.11.0
"""
SUCCESS = 0
SUCCESS_WITH_CONTENT = 1
@staticmethod
def _from_j_result_kind(j_result_kind):
gateway = get_gateway()
JResultKind = gateway.jvm.org.apache.flink.table.api.ResultKind
if j_result_kind == JResultKind.SUCCESS:
return ResultKind.SUCCESS
elif j_result_kind == JResultKind.SUCCESS_WITH_CONTENT:
return ResultKind.SUCCESS_WITH_CONTENT
else:
raise Exception("Unsupported Java result kind: %s" % j_result_kind)
| 1,930 | 36.134615 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/types.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import calendar
import ctypes
import datetime
import decimal
import sys
import time
from array import array
from copy import copy
from enum import Enum
from functools import reduce
from threading import RLock
from py4j.java_gateway import get_java_class
from typing import List, Union
from pyflink.common.types import _create_row
from pyflink.util.java_utils import to_jarray, is_instance_of
from pyflink.java_gateway import get_gateway
from pyflink.common import Row, RowKind
__all__ = ['DataTypes', 'UserDefinedType', 'Row', 'RowKind']
class DataType(object):
"""
Describes the data type of a value in the table ecosystem. Instances of this class can be used
to declare input and/or output types of operations.
:class:`DataType` has two responsibilities: declaring a logical type and giving hints
about the physical representation of data to the optimizer. While the logical type is mandatory,
hints are optional but useful at the edges to other APIs.
The logical type is independent of any physical representation and is close to the "data type"
terminology of the SQL standard.
Physical hints are required at the edges of the table ecosystem. Hints indicate the data format
that an implementation expects.
:param nullable: boolean, whether the type can be null (None) or not.
"""
def __init__(self, nullable=True):
self._nullable = nullable
self._conversion_cls = ''
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, str(self._nullable).lower())
def __str__(self, *args, **kwargs):
return self.__class__.type_name()
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def not_null(self):
cp = copy(self)
cp._nullable = False
return cp
def nullable(self):
cp = copy(self)
cp._nullable = True
return cp
@classmethod
def type_name(cls) -> str:
return cls.__name__[:-4].upper()
def bridged_to(self, conversion_cls) -> 'DataType':
"""
Adds a hint that data should be represented using the given class when entering or leaving
the table ecosystem.
:param conversion_cls: the string representation of the conversion class
"""
self._conversion_cls = conversion_cls
return self
def need_conversion(self) -> bool:
"""
Does this type need to conversion between Python object and internal SQL object.
This is used to avoid the unnecessary conversion for ArrayType/MultisetType/MapType/RowType.
"""
return False
def to_sql_type(self, obj):
"""
Converts a Python object into an internal SQL object.
"""
return obj
def from_sql_type(self, obj):
"""
Converts an internal SQL object into a native Python object.
"""
return obj
class AtomicType(DataType):
"""
An internal type used to represent everything that is not
arrays, rows, and maps.
"""
def __init__(self, nullable=True):
super(AtomicType, self).__init__(nullable)
class NullType(AtomicType):
"""
Null type.
The data type representing None.
"""
def __init__(self):
super(NullType, self).__init__(True)
class NumericType(AtomicType):
"""
Numeric data types.
"""
def __init__(self, nullable=True):
super(NumericType, self).__init__(nullable)
class IntegralType(NumericType):
"""
Integral data types.
"""
def __init__(self, nullable=True):
super(IntegralType, self).__init__(nullable)
class FractionalType(NumericType):
"""
Fractional data types.
"""
def __init__(self, nullable=True):
super(FractionalType, self).__init__(nullable)
class CharType(AtomicType):
"""
Char data type. SQL CHAR(n)
The serialized string representation is ``char(n)`` where ``n`` (default: 1) is the number of
code points. ``n`` must have a value between 1 and 2147483647(0x7fffffff) (both inclusive).
:param length: int, the string representation length.
:param nullable: boolean, whether the type can be null (None) or not.
"""
def __init__(self, length=1, nullable=True):
super(CharType, self).__init__(nullable)
self.length = length
def __repr__(self):
return 'CharType(%d, %s)' % (self.length, str(self._nullable).lower())
class VarCharType(AtomicType):
"""
Varchar data type. SQL VARCHAR(n)
The serialized string representation is ``varchar(n)`` where 'n' (default: 1) is the maximum
number of code points. 'n' must have a value between 1 and 2147483647(0x7fffffff)
(both inclusive).
:param length: int, the maximum string representation length.
:param nullable: boolean, whether the type can be null (None) or not.
"""
def __init__(self, length=1, nullable=True):
super(VarCharType, self).__init__(nullable)
self.length = length
def __repr__(self):
return "VarCharType(%d, %s)" % (self.length, str(self._nullable).lower())
class BinaryType(AtomicType):
"""
Binary (byte array) data type. SQL BINARY(n)
The serialized string representation is ``binary(n)`` where ``n`` (default: 1) is the number of
bytes. ``n`` must have a value between 1 and 2147483647(0x7fffffff) (both inclusive).
:param length: int, the number of bytes.
:param nullable: boolean, whether the type can be null (None) or not.
"""
def __init__(self, length=1, nullable=True):
super(BinaryType, self).__init__(nullable)
self.length = length
def __repr__(self):
return "BinaryType(%d, %s)" % (self.length, str(self._nullable).lower())
class VarBinaryType(AtomicType):
"""
Binary (byte array) data type. SQL VARBINARY(n)
The serialized string representation is ``varbinary(n)`` where ``n`` (default: 1) is the
maximum number of bytes. ``n`` must have a value between 1 and 0x7fffffff (both inclusive).
:param length: int, the maximum number of bytes.
:param nullable: boolean, whether the type can be null (None) or not.
"""
def __init__(self, length=1, nullable=True):
super(VarBinaryType, self).__init__(nullable)
self.length = length
def __repr__(self):
return "VarBinaryType(%d, %s)" % (self.length, str(self._nullable).lower())
class BooleanType(AtomicType):
"""
Boolean data types. SQL BOOLEAN
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(BooleanType, self).__init__(nullable)
class TinyIntType(IntegralType):
"""
Byte data type. SQL TINYINT (8bits)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(TinyIntType, self).__init__(nullable)
class SmallIntType(IntegralType):
"""
Short data type. SQL SMALLINT (16bits)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(SmallIntType, self).__init__(nullable)
class IntType(IntegralType):
"""
Int data types. SQL INT (32bits)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(IntType, self).__init__(nullable)
class BigIntType(IntegralType):
"""
Long data types. SQL BIGINT (64bits)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(BigIntType, self).__init__(nullable)
class FloatType(FractionalType):
"""
Float data type. SQL FLOAT
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(FloatType, self).__init__(nullable)
class DoubleType(FractionalType):
"""
Double data type. SQL DOUBLE
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(DoubleType, self).__init__(nullable)
class DecimalType(FractionalType):
"""
Decimal (decimal.Decimal) data type.
The DecimalType must have fixed precision (the maximum total number of digits)
and scale (the number of digits on the right of dot). For example, (5, 2) can
support the value from [-999.99 to 999.99].
The precision can be up to 38, the scale must be less or equal to precision.
When create a DecimalType, the default precision and scale is (10, 0). When infer
schema from decimal.Decimal objects, it will be DecimalType(38, 18).
:param precision: the number of digits in a number (default: 10)
:param scale: the number of digits on right side of dot. (default: 0)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, precision=10, scale=0, nullable=True):
super(DecimalType, self).__init__(nullable)
assert 1 <= precision <= 38
assert 0 <= scale <= precision
self.precision = precision
self.scale = scale
self.has_precision_info = True # this is public API
def __repr__(self):
return "DecimalType(%d, %d, %s)" % (self.precision, self.scale, str(self._nullable).lower())
class DateType(AtomicType):
"""
Date data type. SQL DATE
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, nullable=True):
super(DateType, self).__init__(nullable)
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def need_conversion(self):
return True
def to_sql_type(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def from_sql_type(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeType(AtomicType):
"""
Time data type. SQL TIME
The precision must be greater than or equal to 0 and less than or equal to 9.
:param precision: int, the number of digits of fractional seconds (default: 0)
:param nullable: boolean, whether the field can be null (None) or not.
"""
EPOCH_ORDINAL = calendar.timegm(time.localtime(0)) * 10 ** 6
def __init__(self, precision=0, nullable=True):
super(TimeType, self).__init__(nullable)
assert 0 <= precision <= 9
self.precision = precision
def __repr__(self):
return "TimeType(%s, %s)" % (self.precision, str(self._nullable).lower())
def need_conversion(self):
return True
def to_sql_type(self, t):
if t is not None:
if t.tzinfo is not None:
offset = t.utcoffset()
offset = offset if offset else datetime.timedelta()
offset_microseconds =\
(offset.days * 86400 + offset.seconds) * 10 ** 6 + offset.microseconds
else:
offset_microseconds = self.EPOCH_ORDINAL
minutes = t.hour * 60 + t.minute
seconds = minutes * 60 + t.second
return seconds * 10 ** 6 + t.microsecond - offset_microseconds
def from_sql_type(self, t):
if t is not None:
seconds, microseconds = divmod(t + self.EPOCH_ORDINAL, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
class TimestampType(AtomicType):
"""
Timestamp data type. SQL TIMESTAMP WITHOUT TIME ZONE.
Consisting of ``year-month-day hour:minute:second[.fractional]`` with up to nanosecond
precision and values ranging from ``0000-01-01 00:00:00.000000000`` to
``9999-12-31 23:59:59.999999999``. Compared to the SQL standard, leap seconds (23:59:60 and
23:59:61) are not supported.
This class does not store or represent a time-zone. Instead, it is a description of
the date, as used for birthdays, combined with the local time as seen on a wall clock.
It cannot represent an instant on the time-line without additional information
such as an offset or time-zone.
The precision must be greater than or equal to 0 and less than or equal to 9.
:param precision: int, the number of digits of fractional seconds (default: 6)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, precision=6, nullable=True):
super(TimestampType, self).__init__(nullable)
assert 0 <= precision <= 9
self.precision = precision
def __repr__(self):
return "TimestampType(%s, %s)" % (self.precision, str(self._nullable).lower())
def need_conversion(self):
return True
def to_sql_type(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 10 ** 6 + dt.microsecond
def from_sql_type(self, ts):
if ts is not None:
return datetime.datetime.fromtimestamp(ts // 10 ** 6).replace(microsecond=ts % 10 ** 6)
class LocalZonedTimestampType(AtomicType):
"""
Timestamp data type. SQL TIMESTAMP WITH LOCAL TIME ZONE.
Consisting of ``year-month-day hour:minute:second[.fractional] zone`` with up to nanosecond
precision and values ranging from ``0000-01-01 00:00:00.000000000 +14:59`` to
``9999-12-31 23:59:59.999999999 -14:59``. Compared to the SQL standard, Leap seconds (23:59:60
and 23:59:61) are not supported.
The value will be stored internally as a long value which stores all date and time
fields, to a precision of nanoseconds, as well as the offset from UTC/Greenwich.
The precision must be greater than or equal to 0 and less than or equal to 9.
:param precision: int, the number of digits of fractional seconds (default: 6)
:param nullable: boolean, whether the field can be null (None) or not.
"""
EPOCH_ORDINAL = calendar.timegm(time.localtime(0)) * 10 ** 6
def __init__(self, precision=6, nullable=True):
super(LocalZonedTimestampType, self).__init__(nullable)
assert 0 <= precision <= 9
self.precision = precision
def __repr__(self):
return "LocalZonedTimestampType(%s, %s)" % (self.precision, str(self._nullable).lower())
def need_conversion(self):
return True
def to_sql_type(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 10 ** 6 + dt.microsecond + self.EPOCH_ORDINAL
def from_sql_type(self, ts):
if ts is not None:
ts = ts - self.EPOCH_ORDINAL
return datetime.datetime.fromtimestamp(ts // 10 ** 6).replace(microsecond=ts % 10 ** 6)
class ZonedTimestampType(AtomicType):
"""
Timestamp data type with time zone. SQL TIMESTAMP WITH TIME ZONE.
Consisting of ``year-month-day hour:minute:second[.fractional] zone`` with up to nanosecond
precision and values ranging from {@code 0000-01-01 00:00:00.000000000 +14:59} to
``9999-12-31 23:59:59.999999999 -14:59``. Compared to the SQL standard, leap seconds (23:59:60
and 23:59:61) are not supported.
The value will be stored internally all date and time fields, to a precision of
nanoseconds, and a time-zone, with a zone offset used to handle ambiguous local date-times.
The precision must be greater than or equal to 0 and less than or equal to 9.
:param precision: int, the number of digits of fractional seconds (default: 6)
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, precision=6, nullable=True):
super(ZonedTimestampType, self).__init__(nullable)
assert 0 <= precision <= 9
self.precision = precision
def __repr__(self):
return "ZonedTimestampType(%s, %s)" % (self.precision, str(self._nullable).lower())
def need_conversion(self):
return True
def to_sql_type(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
tzinfo = dt.tzinfo if dt.tzinfo else datetime.datetime.now(
datetime.timezone.utc).astimezone().tzinfo
offset = int(tzinfo.utcoffset(dt).total_seconds())
return int(seconds + offset) * 10 ** 6 + dt.microsecond, offset
def from_sql_type(self, zoned_ts):
if zoned_ts is not None:
from dateutil import tz
ts = zoned_ts[0] - zoned_ts[1] * 10 ** 6
tzinfo = tz.tzoffset(None, zoned_ts[1])
return datetime.datetime.fromtimestamp(ts // 10 ** 6, tz=tzinfo).replace(
microsecond=ts % 10 ** 6)
class Resolution(object):
"""
Helper class for defining the resolution of an interval.
:param unit: value defined in the constants of :class:`IntervalUnit`.
:param precision: the number of digits of years (=year precision) or the number of digits of
days (=day precision) or the number of digits of fractional seconds (
=fractional precision).
"""
class IntervalUnit(Enum):
SECOND = 0
MINUTE = 1
HOUR = 2
DAY = 3
MONTH = 4
YEAR = 5
def __init__(self, unit, precision=-1):
self._unit = unit
self._precision = precision
@property
def unit(self):
return self._unit
@property
def precision(self):
return self._precision
def __str__(self):
return '%s(%s)' % (str(self._unit), str(self._precision))
class YearMonthIntervalType(AtomicType):
"""
Year-month interval types. The type must be parameterized to one of the following
resolutions: interval of years, interval of years to months, or interval of months.
An interval of year-month consists of ``+years-months`` with values ranging from ``-9999-11``
to ``+9999-11``. The value representation is the same for all types of resolutions. For
example, an interval of months of 50 is always represented in an interval-of-years-to-months
format (with default year precision): ``+04-02``.
:param resolution: value defined in the constants of :class:`YearMonthResolution`,
representing one of the following resolutions: interval of years,
interval of years to months, or interval of months.
:param precision: int, the number of digits of years, must have a value
between 1 and 4 (both inclusive), default (2).
:param nullable: boolean, whether the field can be null (None) or not.
"""
class YearMonthResolution(object):
"""
Supported resolutions of :class:`YearMonthIntervalType`.
"""
YEAR = 1
MONTH = 2
YEAR_TO_MONTH = 3
DEFAULT_PRECISION = 2
def __init__(self, resolution, precision=DEFAULT_PRECISION, nullable=True):
assert resolution == YearMonthIntervalType.YearMonthResolution.YEAR or \
resolution == YearMonthIntervalType.YearMonthResolution.MONTH or \
resolution == YearMonthIntervalType.YearMonthResolution.YEAR_TO_MONTH
assert resolution != YearMonthIntervalType.YearMonthResolution.MONTH or \
precision == self.DEFAULT_PRECISION
assert 1 <= precision <= 4
self._resolution = resolution
self._precision = precision
super(YearMonthIntervalType, self).__init__(nullable)
@property
def resolution(self):
return self._resolution
@property
def precision(self):
return self._precision
class DayTimeIntervalType(AtomicType):
"""
Day-time interval types. The type must be parameterized to one of the following resolutions
with up to nanosecond precision: interval of days, interval of days to hours, interval of
days to minutes, interval of days to seconds, interval of hours, interval of hours to minutes,
interval of hours to seconds, interval of minutes, interval of minutes to seconds,
or interval of seconds.
An interval of day-time consists of ``+days hours:months:seconds.fractional`` with values
ranging from ``-999999 23:59:59.999999999`` to ``+999999 23:59:59.999999999``. The value
representation is the same for all types of resolutions. For example, an interval of seconds
of 70 is always represented in an interval-of-days-to-seconds format (with default precisions):
``+00 00:01:10.000000``.
:param resolution: value defined in the constants of :class:`DayTimeResolution`,
representing one of the following resolutions: interval of days, interval
of days to hours, interval of days to minutes, interval of days to seconds,
interval of hours, interval of hours to minutes, interval of hours to
seconds, interval of minutes, interval of minutes to seconds, or interval
of seconds.
:param day_precision: the number of digits of days, must have a value between 1 and 6 (both
inclusive) (default 2).
:param fractional_precision: the number of digits of fractional seconds, must have a value
between 0 and 9 (both inclusive) (default 6).
"""
class DayTimeResolution(Enum):
"""
Supported resolutions of :class:`DayTimeIntervalType`.
"""
DAY = 1
DAY_TO_HOUR = 2
DAY_TO_MINUTE = 3
DAY_TO_SECOND = 4
HOUR = 5
HOUR_TO_MINUTE = 6
HOUR_TO_SECOND = 7
MINUTE = 8
MINUTE_TO_SECOND = 9
SECOND = 10
DEFAULT_DAY_PRECISION = 2
DEFAULT_FRACTIONAL_PRECISION = 6
def __init__(self, resolution, day_precision=DEFAULT_DAY_PRECISION,
fractional_precision=DEFAULT_FRACTIONAL_PRECISION, nullable=True):
assert resolution == DayTimeIntervalType.DayTimeResolution.DAY or \
resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR or \
resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_SECOND or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_SECOND or \
resolution == DayTimeIntervalType.DayTimeResolution.MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.MINUTE_TO_SECOND or \
resolution == DayTimeIntervalType.DayTimeResolution.SECOND
assert not self._needs_default_day_precision(
resolution) or day_precision == self.DEFAULT_DAY_PRECISION
assert not self._needs_default_fractional_precision(
resolution) or fractional_precision == self.DEFAULT_FRACTIONAL_PRECISION
assert 1 <= day_precision <= 6
assert 0 <= fractional_precision <= 9
self._resolution = resolution
self._day_precision = day_precision
self._fractional_precision = fractional_precision
super(DayTimeIntervalType, self).__init__(nullable)
def need_conversion(self):
return True
def to_sql_type(self, timedelta):
if timedelta is not None:
return (timedelta.days * 86400 + timedelta.seconds) * 10 ** 6 + timedelta.microseconds
def from_sql_type(self, ts):
if ts is not None:
return datetime.timedelta(microseconds=ts)
@property
def resolution(self) -> 'DayTimeIntervalType.DayTimeResolution':
return self._resolution
@property
def day_precision(self) -> int:
return self._day_precision
@property
def fractional_precision(self) -> int:
return self._fractional_precision
@staticmethod
def _needs_default_day_precision(resolution) -> bool:
if resolution == DayTimeIntervalType.DayTimeResolution.HOUR or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_SECOND or \
resolution == DayTimeIntervalType.DayTimeResolution.MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.MINUTE_TO_SECOND or \
resolution == DayTimeIntervalType.DayTimeResolution.SECOND:
return True
else:
return False
@staticmethod
def _needs_default_fractional_precision(resolution) -> bool:
if resolution == DayTimeIntervalType.DayTimeResolution.DAY or \
resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR or \
resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR or \
resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE or \
resolution == DayTimeIntervalType.DayTimeResolution.MINUTE:
return True
else:
return False
_resolution_mappings = {
(Resolution.IntervalUnit.YEAR, None):
lambda p1, p2: YearMonthIntervalType(
YearMonthIntervalType.YearMonthResolution.YEAR, p1),
(Resolution.IntervalUnit.MONTH, None):
lambda p1, p2: YearMonthIntervalType(
YearMonthIntervalType.YearMonthResolution.MONTH),
(Resolution.IntervalUnit.YEAR, Resolution.IntervalUnit.MONTH):
lambda p1, p2: YearMonthIntervalType(
YearMonthIntervalType.YearMonthResolution.YEAR_TO_MONTH),
(Resolution.IntervalUnit.DAY, None):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.DAY,
p1,
DayTimeIntervalType.DEFAULT_FRACTIONAL_PRECISION),
(Resolution.IntervalUnit.DAY, Resolution.IntervalUnit.HOUR):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR,
p1,
DayTimeIntervalType.DEFAULT_FRACTIONAL_PRECISION),
(Resolution.IntervalUnit.DAY, Resolution.IntervalUnit.MINUTE):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.DAY_TO_MINUTE,
p1,
DayTimeIntervalType.DEFAULT_FRACTIONAL_PRECISION),
(Resolution.IntervalUnit.DAY, Resolution.IntervalUnit.SECOND):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.DAY_TO_SECOND, p1, p2),
(Resolution.IntervalUnit.HOUR, None):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.HOUR),
(Resolution.IntervalUnit.HOUR, Resolution.IntervalUnit.MINUTE):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE),
(Resolution.IntervalUnit.HOUR, Resolution.IntervalUnit.SECOND):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.HOUR_TO_SECOND,
DayTimeIntervalType.DEFAULT_DAY_PRECISION,
p2),
(Resolution.IntervalUnit.MINUTE, None):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.MINUTE),
(Resolution.IntervalUnit.MINUTE, Resolution.IntervalUnit.SECOND):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.MINUTE_TO_SECOND,
DayTimeIntervalType.DEFAULT_DAY_PRECISION,
p2),
(Resolution.IntervalUnit.SECOND, None):
lambda p1, p2: DayTimeIntervalType(
DayTimeIntervalType.DayTimeResolution.SECOND,
DayTimeIntervalType.DEFAULT_DAY_PRECISION,
p1)
}
def _from_resolution(upper_resolution: Resolution, lower_resolution: Resolution = None):
"""
Creates an interval type (YearMonthIntervalType or DayTimeIntervalType) from the
upper_resolution and lower_resolution.
"""
lower_unit = None if lower_resolution is None else lower_resolution.unit
lower_precision = -1 if lower_resolution is None else lower_resolution.precision
interval_type_provider = _resolution_mappings[(upper_resolution.unit, lower_unit)]
if interval_type_provider is None:
raise ValueError(
"Unsupported interval definition '%s TO %s'. Please check the documentation for "
"supported combinations for year-month and day-time intervals."
% (upper_resolution, lower_resolution))
return interval_type_provider(upper_resolution.precision, lower_precision)
def _from_java_interval_type(j_interval_type):
"""
Creates an interval type from the specified Java interval type.
:param j_interval_type: the Java interval type.
:return: :class:`YearMonthIntervalType` or :class:`DayTimeIntervalType`.
"""
gateway = get_gateway()
if is_instance_of(j_interval_type, gateway.jvm.YearMonthIntervalType):
resolution = j_interval_type.getResolution()
precision = j_interval_type.getYearPrecision()
def _from_java_year_month_resolution(j_resolution):
if j_resolution == gateway.jvm.YearMonthIntervalType.YearMonthResolution.YEAR:
return YearMonthIntervalType.YearMonthResolution.YEAR
elif j_resolution == gateway.jvm.YearMonthIntervalType.YearMonthResolution.MONTH:
return YearMonthIntervalType.YearMonthResolution.MONTH
else:
return YearMonthIntervalType.YearMonthResolution.YEAR_TO_MONTH
return YearMonthIntervalType(_from_java_year_month_resolution(resolution), precision)
else:
resolution = j_interval_type.getResolution()
day_precision = j_interval_type.getDayPrecision()
fractional_precision = j_interval_type.getFractionalPrecision()
def _from_java_day_time_resolution(j_resolution):
if j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.DAY:
return DayTimeIntervalType.DayTimeResolution.DAY
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR:
return DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.DAY_TO_MINUTE:
return DayTimeIntervalType.DayTimeResolution.DAY_TO_MINUTE
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.DAY_TO_SECOND:
return DayTimeIntervalType.DayTimeResolution.DAY_TO_SECOND
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.HOUR:
return DayTimeIntervalType.DayTimeResolution.HOUR
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE:
return DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.HOUR_TO_SECOND:
return DayTimeIntervalType.DayTimeResolution.HOUR_TO_SECOND
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.MINUTE:
return DayTimeIntervalType.DayTimeResolution.MINUTE
elif j_resolution == gateway.jvm.DayTimeIntervalType.DayTimeResolution.MINUTE_TO_SECOND:
return DayTimeIntervalType.DayTimeResolution.MINUTE_TO_SECOND
else:
return DayTimeIntervalType.DayTimeResolution.SECOND
return DayTimeIntervalType(
_from_java_day_time_resolution(resolution), day_precision, fractional_precision)
_boxed_to_primitive_array_map = \
{'java.lang.Integer': '[I',
'java.lang.Long': '[J',
'java.lang.Byte': '[B',
'java.lang.Short': '[S',
'java.lang.Character': '[C',
'java.lang.Boolean': '[Z',
'java.lang.Float': '[F',
'java.lang.Double': '[D'}
class ArrayType(DataType):
"""
Array data type.
:param element_type: :class:`DataType` of each element in the array.
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, element_type, nullable=True):
"""
>>> ArrayType(VarCharType(100)) == ArrayType(VarCharType(100))
True
>>> ArrayType(VarCharType(100)) == ArrayType(BigIntType())
False
"""
assert isinstance(element_type, DataType), \
"element_type %s should be an instance of %s" % (element_type, DataType)
super(ArrayType, self).__init__(nullable)
self.element_type = element_type
def __repr__(self):
return "ArrayType(%s, %s)" % (repr(self.element_type), str(self._nullable).lower())
def need_conversion(self):
return self.element_type.need_conversion()
def to_sql_type(self, obj):
if not self.need_conversion():
return obj
return obj and [self.element_type.to_sql_type(v) for v in obj]
def from_sql_type(self, obj):
if not self.need_conversion():
return obj
return obj and [self.element_type.to_sql_type(v) for v in obj]
class ListViewType(DataType):
def __init__(self, element_type):
assert isinstance(element_type, DataType), \
"element_type %s should be an instance of %s" % (element_type, DataType)
super(ListViewType, self).__init__(False)
self._element_type = element_type
def __repr__(self):
return "ListViewType(%s)" % repr(self._element_type)
def to_sql_type(self, obj):
raise Exception("ListViewType can only be used in accumulator type declaration of "
"AggregateFunction.")
def from_sql_type(self, obj):
raise Exception("ListViewType can only be used in accumulator type declaration of "
"AggregateFunction.")
class MapType(DataType):
"""
Map data type.
:param key_type: :class:`DataType` of the keys in the map.
:param value_type: :class:`DataType` of the values in the map.
:param nullable: boolean, whether the field can be null (None) or not.
Keys in a map data type are not allowed to be null (None).
"""
def __init__(self, key_type, value_type, nullable=True):
"""
>>> (MapType(VarCharType(100, nullable=False), IntType())
... == MapType(VarCharType(100, nullable=False), IntType()))
True
>>> (MapType(VarCharType(100, nullable=False), IntType())
... == MapType(VarCharType(100, nullable=False), FloatType()))
False
"""
assert isinstance(key_type, DataType), \
"key_type %s should be an instance of %s" % (key_type, DataType)
assert isinstance(value_type, DataType), \
"value_type %s should be an instance of %s" % (value_type, DataType)
super(MapType, self).__init__(nullable)
self.key_type = key_type
self.value_type = value_type
def __repr__(self):
return "MapType(%s, %s, %s)" % (
repr(self.key_type), repr(self.value_type), str(self._nullable).lower())
def need_conversion(self):
return self.key_type.need_conversion() or self.value_type.need_conversion()
def to_sql_type(self, obj):
if not self.need_conversion():
return obj
return obj and dict((self.key_type.to_sql_type(k), self.value_type.to_sql_type(v))
for k, v in obj.items())
def from_sql_type(self, obj):
if not self.need_conversion():
return obj
return obj and dict((self.key_type.from_sql_type(k), self.value_type.from_sql_type(v))
for k, v in obj.items())
class MapViewType(DataType):
def __init__(self, key_type, value_type):
assert isinstance(key_type, DataType), \
"element_type %s should be an instance of %s" % (key_type, DataType)
assert isinstance(value_type, DataType), \
"element_type %s should be an instance of %s" % (value_type, DataType)
super(MapViewType, self).__init__(False)
self._key_type = key_type
self._value_type = value_type
def __repr__(self):
return "MapViewType(%s, %s)" % (repr(self._key_type), repr(self._value_type))
def to_sql_type(self, obj):
raise Exception("MapViewType can only be used in accumulator type declaration of "
"AggregateFunction.")
def from_sql_type(self, obj):
raise Exception("MapViewType can only be used in accumulator type declaration of "
"AggregateFunction.")
class MultisetType(DataType):
"""
MultisetType data type.
:param element_type: :class:`DataType` of each element in the multiset.
:param nullable: boolean, whether the field can be null (None) or not.
"""
def __init__(self, element_type, nullable=True):
"""
>>> MultisetType(VarCharType(100)) == MultisetType(VarCharType(100))
True
>>> MultisetType(VarCharType(100)) == MultisetType(BigIntType())
False
"""
assert isinstance(element_type, DataType), \
"element_type %s should be an instance of %s" % (element_type, DataType)
super(MultisetType, self).__init__(nullable)
self.element_type = element_type
def __repr__(self):
return "MultisetType(%s, %s)" % (repr(self.element_type), str(self._nullable).lower())
def need_conversion(self):
return self.element_type.need_conversion()
def to_sql_type(self, obj):
if not self.need_conversion():
return obj
return obj and [self.element_type.to_sql_type(v) for v in obj]
def from_sql_type(self, obj):
if not self.need_conversion():
return obj
return obj and [self.element_type.to_sql_type(v) for v in obj]
class RowField(object):
"""
A field in :class:`RowType`.
:param name: string, name of the field.
:param data_type: :class:`DataType` of the field.
:param description: string, description of the field.
"""
def __init__(self, name, data_type, description=None):
"""
>>> (RowField("f1", VarCharType(100)) == RowField("f1", VarCharType(100)))
True
>>> (RowField("f1", VarCharType(100)) == RowField("f2", VarCharType(100)))
False
"""
assert isinstance(data_type, DataType), \
"data_type %s should be an instance of %s" % (data_type, DataType)
assert isinstance(name, str), "field name %s should be string" % name
if not isinstance(name, str):
name = name.encode('utf-8')
if description is not None:
assert isinstance(description, str), \
"description %s should be string" % description
if not isinstance(description, str):
description = description.encode('utf-8')
self.name = name
self.data_type = data_type
self.description = '...' if description is None else description
def __repr__(self):
return "RowField(%s, %s, %s)" % (self.name, repr(self.data_type), self.description)
def __str__(self, *args, **kwargs):
return "RowField(%s, %s)" % (self.name, self.data_type)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def need_conversion(self):
return self.data_type.need_conversion()
def to_sql_type(self, obj):
return self.data_type.to_sql_type(obj)
def from_sql_type(self, obj):
return self.data_type.from_sql_type(obj)
class RowType(DataType):
"""
Row type, consisting of a list of :class:`RowField`.
This is the data type representing a :class:`Row`.
Iterating a :class:`RowType` will iterate its :class:`RowField`\\s.
A contained :class:`RowField` can be accessed by name or position.
>>> row1 = RowType([RowField("f1", VarCharType(100))])
>>> row1["f1"]
RowField(f1, VarCharType(100))
>>> row1[0]
RowField(f1, VarCharType(100))
"""
def __init__(self, fields=None, nullable=True):
"""
>>> row1 = RowType([RowField("f1", VarCharType(100))])
>>> row2 = RowType([RowField("f1", VarCharType(100))])
>>> row1 == row2
True
>>> row1 = RowType([RowField("f1", VarCharType(100))])
>>> row2 = RowType([RowField("f1", VarCharType(100)), RowField("f2", IntType())])
>>> row1 == row2
False
"""
super(RowType, self).__init__(nullable)
if not fields:
self.fields = []
self.names = []
else:
self.fields = fields
self.names = [f.name for f in fields]
assert all(isinstance(f, RowField) for f in fields), \
"fields should be a list of RowField"
# Precalculated list of fields that need conversion with
# from_sql_type/to_sql_type functions
self._need_conversion = [f.need_conversion() for f in self]
self._need_serialize_any_field = any(self._need_conversion)
def add(self, field, data_type=None):
"""
Constructs a RowType by adding new elements to it to define the schema. The method accepts
either:
a) A single parameter which is a RowField object.
b) 2 parameters as (name, data_type). The data_type parameter may be either a String
or a DataType object.
>>> row1 = RowType().add("f1", VarCharType(100)).add("f2", VarCharType(100))
>>> row2 = RowType([RowField("f1", VarCharType(100)), RowField("f2", VarCharType(100))])
>>> row1 == row2
True
>>> row1 = RowType().add(RowField("f1", VarCharType(100)))
>>> row2 = RowType([RowField("f1", VarCharType(100))])
>>> row1 == row2
True
>>> row2 = RowType([RowField("f1", VarCharType(100))])
>>> row1 == row2
True
:param field: Either the name of the field or a RowField object
:param data_type: If present, the DataType of the RowField to create
:return: a new updated RowType
"""
if isinstance(field, RowField):
self.fields.append(field)
self.names.append(field.name)
else:
if isinstance(field, str) and data_type is None:
raise ValueError("Must specify DataType if passing name of row_field to create.")
self.fields.append(RowField(field, data_type))
self.names.append(field)
# Precalculated list of fields that need conversion with
# from_sql_type/to_sql_type functions
self._need_conversion = [f.need_conversion() for f in self]
self._need_serialize_any_field = any(self._need_conversion)
return self
def __iter__(self):
"""
Iterate the fields.
"""
return iter(self.fields)
def __len__(self):
"""
Returns the number of fields.
"""
return len(self.fields)
def __getitem__(self, key):
"""
Accesses fields by name or slice.
"""
if isinstance(key, str):
for field in self:
if field.name == key:
return field
raise KeyError('No RowField named {0}'.format(key))
elif isinstance(key, int):
try:
return self.fields[key]
except IndexError:
raise IndexError('RowType index out of range')
elif isinstance(key, slice):
return RowType(self.fields[key])
else:
raise TypeError('RowType keys should be strings, integers or slices')
def __repr__(self):
return "RowType(%s)" % ",".join(repr(field) for field in self)
def field_names(self):
"""
Returns all field names in a list.
>>> row = RowType([RowField("f1", VarCharType(100))])
>>> row.field_names()
['f1']
"""
return list(self.names)
def field_types(self):
"""
Returns all field types in a list.
.. versionadded:: 1.11.0
"""
return list([f.data_type for f in self.fields])
def need_conversion(self):
# We need convert Row()/namedtuple into tuple()
return True
def to_sql_type(self, obj):
if obj is None:
return
if self._need_serialize_any_field:
# Only calling to_sql_type function for fields that need conversion
if isinstance(obj, dict):
return (RowKind.INSERT.value,) + tuple(
f.to_sql_type(obj.get(n)) if c else obj.get(n)
for n, f, c in zip(self.names, self.fields, self._need_conversion))
elif isinstance(obj, Row) and hasattr(obj, "_fields"):
return (obj.get_row_kind().value,) + tuple(
f.to_sql_type(obj[n]) if c else obj[n]
for n, f, c in zip(self.names, self.fields, self._need_conversion))
elif isinstance(obj, Row):
return (obj.get_row_kind().value, ) + tuple(
f.to_sql_type(v) if c else v
for f, v, c in zip(self.fields, obj, self._need_conversion))
elif isinstance(obj, (tuple, list, Row)):
return (RowKind.INSERT.value,) + tuple(
f.to_sql_type(v) if c else v
for f, v, c in zip(self.fields, obj, self._need_conversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return (RowKind.INSERT.value,) + tuple(
f.to_sql_type(d.get(n)) if c else d.get(n)
for n, f, c in zip(self.names, self.fields, self._need_conversion))
else:
raise ValueError("Unexpected tuple %r with RowType" % obj)
else:
if isinstance(obj, dict):
return (RowKind.INSERT.value,) + tuple(obj.get(n) for n in self.names)
elif isinstance(obj, Row) and hasattr(obj, "_fields"):
return (obj.get_row_kind().value,) + tuple(obj[n] for n in self.names)
elif isinstance(obj, Row):
return (obj.get_row_kind().value,) + tuple(obj)
elif isinstance(obj, (list, tuple)):
return (RowKind.INSERT.value,) + tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return (RowKind.INSERT.value,) + tuple(d.get(n) for n in self.names)
else:
raise ValueError("Unexpected tuple %r with RowType" % obj)
def from_sql_type(self, obj):
if obj is None:
return
if isinstance(obj, Row):
# it's already converted by pickler
return obj
if self._need_serialize_any_field:
# Only calling from_sql_type function for fields that need conversion
values = [f.from_sql_type(v) if c else v
for f, v, c in zip(self.fields, obj, self._need_conversion)]
else:
values = obj
return _create_row(self.names, values)
class RawType(DataType):
"""
Logical type of pickled byte array type.
"""
def from_sql_type(self, obj):
import pickle
return pickle.loads(obj)
class UserDefinedType(DataType):
"""
User-defined type (UDT).
.. note:: WARN: Flink Internal Use Only
"""
def __eq__(self, other):
return type(self) == type(other)
@classmethod
def type_name(cls):
return cls.__name__.lower()
@classmethod
def sql_type(cls):
"""
Underlying SQL storage type for this UDT.
"""
raise NotImplementedError("UDT must implement sql_type().")
@classmethod
def module(cls):
"""
The Python module of the UDT.
"""
raise NotImplementedError("UDT must implement module().")
@classmethod
def java_udt(cls):
"""
The class name of the paired Java UDT (could be '', if there
is no corresponding one).
"""
return ''
def need_conversion(self):
return True
@classmethod
def _cached_sql_type(cls):
"""
Caches the sql_type() into class, because it's heavy used in `to_sql_type`.
"""
if not hasattr(cls, "__cached_sql_type"):
cls.__cached_sql_type = cls.sql_type()
return cls.__cached_sql_type
def to_sql_type(self, obj):
if obj is not None:
return self._cached_sql_type().to_sql_type(self.serialize(obj))
def from_sql_type(self, obj):
v = self._cached_sql_type().from_sql_type(obj)
if v is not None:
return self.deserialize(v)
def serialize(self, obj):
"""
Converts the a user-type object into a SQL datum.
"""
raise NotImplementedError("UDT must implement serialize().")
def deserialize(self, datum):
"""
Converts a SQL datum into a user-type object.
"""
raise NotImplementedError("UDT must implement deserialize().")
# Mapping Python types to Flink SQL types
_type_mappings = {
bool: BooleanType(),
int: BigIntType(),
float: DoubleType(),
str: VarCharType(0x7fffffff),
bytearray: VarBinaryType(0x7fffffff),
decimal.Decimal: DecimalType(38, 18),
datetime.date: DateType(),
datetime.datetime: LocalZonedTimestampType(),
datetime.time: TimeType(),
}
# Mapping Python array types to Flink SQL types
# We should be careful here. The size of these types in python depends on C
# implementation. We need to make sure that this conversion does not lose any
# precision. Also, JVM only support signed types, when converting unsigned types,
# keep in mind that it requires 1 more bit when stored as singed types.
#
# Reference for C integer size, see:
# ISO/IEC 9899:201x specification, chapter 5.2.4.2.1 Sizes of integer types <limits.h>.
# Reference for python array typecode, see:
# https://docs.python.org/2/library/array.html
# https://docs.python.org/3.6/library/array.html
# Reference for JVM's supported integral types:
# http://docs.oracle.com/javase/specs/jvms/se8/html/jvms-2.html#jvms-2.3.1
_array_signed_int_typecode_ctype_mappings = {
'b': ctypes.c_byte,
'h': ctypes.c_short,
'i': ctypes.c_int,
'l': ctypes.c_long,
}
_array_unsigned_int_typecode_ctype_mappings = {
'B': ctypes.c_ubyte,
'H': ctypes.c_ushort,
'I': ctypes.c_uint,
'L': ctypes.c_ulong
}
def _int_size_to_type(size):
"""
Returns the data type from the size of integers.
"""
if size <= 8:
return TinyIntType()
if size <= 16:
return SmallIntType()
if size <= 32:
return IntType()
if size <= 64:
return BigIntType()
# The list of all supported array typecodes is stored here
_array_type_mappings = {
# Warning: Actual properties for float and double in C is not specified in C.
# On almost every system supported by both python and JVM, they are IEEE 754
# single-precision binary floating-point format and IEEE 754 double-precision
# binary floating-point format. And we do assume the same thing here for now.
'f': FloatType(),
'd': DoubleType()
}
# compute array typecode mappings for signed integer types
for _typecode in _array_signed_int_typecode_ctype_mappings.keys():
size = ctypes.sizeof(_array_signed_int_typecode_ctype_mappings[_typecode]) * 8
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# compute array typecode mappings for unsigned integer types
for _typecode in _array_unsigned_int_typecode_ctype_mappings.keys():
# JVM does not have unsigned types, so use signed types that is at least 1
# bit larger to store
size = ctypes.sizeof(_array_unsigned_int_typecode_ctype_mappings[_typecode]) * 8 + 1
dt = _int_size_to_type(size)
if dt is not None:
_array_type_mappings[_typecode] = dt
# Type code 'u' in Python's array is deprecated since version 3.3, and will be
# removed in version 4.0. See: https://docs.python.org/3/library/array.html
if sys.version_info[0] < 4:
# it can be 16 bits or 32 bits depending on the platform
_array_type_mappings['u'] = CharType(ctypes.sizeof(ctypes.c_wchar)) # type: ignore
def _infer_type(obj):
"""
Infers the data type from obj.
"""
if obj is None:
return NullType()
if hasattr(obj, '__UDT__'):
return obj.__UDT__
data_type = _type_mappings.get(type(obj))
if data_type is not None:
return data_type
if isinstance(obj, dict):
for key, value in obj.items():
if key is not None and value is not None:
return MapType(_infer_type(key).not_null(), _infer_type(value))
else:
return MapType(NullType(), NullType())
elif isinstance(obj, list):
for v in obj:
if v is not None:
return ArrayType(_infer_type(obj[0]))
else:
return ArrayType(NullType())
elif isinstance(obj, array):
if obj.typecode in _array_type_mappings:
return ArrayType(_array_type_mappings[obj.typecode].not_null())
else:
raise TypeError("not supported type: array(%s)" % obj.typecode)
else:
try:
return _infer_schema(obj)
except TypeError:
raise TypeError("not supported type: %s" % type(obj))
def _infer_schema(row, names=None):
"""
Infers the schema from dict/row/namedtuple/object.
"""
if isinstance(row, dict): # dict
items = sorted(row.items())
elif isinstance(row, (Row, tuple, list)):
if hasattr(row, "_fields"): # namedtuple and Row
items = zip(row._fields, tuple(row))
else:
if names is None:
names = ['_%d' % i for i in range(1, len(row) + 1)]
elif len(names) < len(row):
names.extend('_%d' % i for i in range(len(names) + 1, len(row) + 1))
items = zip(names, row)
elif hasattr(row, "__dict__"): # object
items = sorted(row.__dict__.items())
else:
raise TypeError("Can not infer schema for type: %s" % type(row))
fields = [RowField(k, _infer_type(v)) for k, v in items]
return RowType(fields)
def _has_nulltype(dt):
"""
Returns whether there is NullType in `dt` or not.
"""
if isinstance(dt, RowType):
return any(_has_nulltype(f.data_type) for f in dt.fields)
elif isinstance(dt, ArrayType) or isinstance(dt, MultisetType):
return _has_nulltype(dt.element_type)
elif isinstance(dt, MapType):
return _has_nulltype(dt.key_type) or _has_nulltype(dt.value_type)
else:
return isinstance(dt, NullType)
def _merge_type(a, b, name=None):
if name is None:
def new_msg(msg):
return msg
def new_name(n):
return "field %s" % n
else:
def new_msg(msg):
return "%s: %s" % (name, msg)
def new_name(n):
return "field %s in %s" % (n, name)
if isinstance(a, NullType):
return b
elif isinstance(b, NullType):
return a
elif type(a) is not type(b):
# TODO: type cast (such as int -> long)
raise TypeError(new_msg("Can not merge type %s and %s" % (type(a), type(b))))
# same type
if isinstance(a, RowType):
nfs = dict((f.name, f.data_type) for f in b.fields)
fields = [RowField(f.name, _merge_type(f.data_type, nfs.get(f.name, None),
name=new_name(f.name)))
for f in a.fields]
names = set([f.name for f in fields])
for n in nfs:
if n not in names:
fields.append(RowField(n, nfs[n]))
return RowType(fields)
elif isinstance(a, ArrayType):
return ArrayType(_merge_type(a.element_type, b.element_type,
name='element in array %s' % name))
elif isinstance(a, MultisetType):
return MultisetType(_merge_type(a.element_type, b.element_type,
name='element in multiset %s' % name))
elif isinstance(a, MapType):
return MapType(_merge_type(a.key_type, b.key_type, name='key of map %s' % name),
_merge_type(a.value_type, b.value_type, name='value of map %s' % name))
else:
return a
def _infer_schema_from_data(elements, names=None) -> RowType:
"""
Infers schema from list of Row or tuple.
:param elements: list of Row or tuple
:param names: list of column names
:return: :class:`RowType`
"""
if not elements:
raise ValueError("can not infer schema from empty data set")
schema = reduce(_merge_type, (_infer_schema(row, names) for row in elements))
if _has_nulltype(schema):
raise ValueError("Some column types cannot be determined after inferring")
return schema
def _need_converter(data_type):
if isinstance(data_type, RowType):
return True
elif isinstance(data_type, ArrayType) or isinstance(data_type, MultisetType):
return _need_converter(data_type.element_type)
elif isinstance(data_type, MapType):
return _need_converter(data_type.key_type) or _need_converter(data_type.value_type)
elif isinstance(data_type, NullType):
return True
else:
return False
def _create_converter(data_type):
"""
Creates a converter to drop the names of fields in obj.
"""
if not _need_converter(data_type):
return lambda x: x
if isinstance(data_type, ArrayType) or isinstance(data_type, MultisetType):
conv = _create_converter(data_type.element_type)
return lambda row: [conv(v) for v in row]
elif isinstance(data_type, MapType):
kconv = _create_converter(data_type.key_type)
vconv = _create_converter(data_type.value_type)
return lambda row: dict((kconv(k), vconv(v)) for k, v in row.items())
elif isinstance(data_type, NullType):
return lambda x: None
elif not isinstance(data_type, RowType):
return lambda x: x
# dataType must be RowType
names = [f.name for f in data_type.fields]
converters = [_create_converter(f.data_type) for f in data_type.fields]
convert_fields = any(_need_converter(f.data_type) for f in data_type.fields)
def convert_row(obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
if convert_fields:
return tuple(conv(v) for v, conv in zip(obj, converters))
else:
return tuple(obj)
if isinstance(obj, dict):
d = obj
elif hasattr(obj, "__dict__"): # object
d = obj.__dict__
else:
raise TypeError("Unexpected obj type: %s" % type(obj))
if convert_fields:
return tuple([conv(d.get(name)) for name, conv in zip(names, converters)])
else:
return tuple([d.get(name) for name in names])
return convert_row
_python_java_types_mapping = None
_python_java_types_mapping_lock = RLock()
_primitive_array_element_types = {BooleanType, TinyIntType, SmallIntType, IntType, BigIntType,
FloatType, DoubleType}
def _from_java_data_type(j_data_type):
"""
Converts Java DataType to Python DataType.
"""
gateway = get_gateway()
# Atomic Type with parameters.
if is_instance_of(j_data_type, gateway.jvm.AtomicDataType):
logical_type = j_data_type.getLogicalType()
if is_instance_of(logical_type, gateway.jvm.CharType):
data_type = DataTypes.CHAR(logical_type.getLength(), logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.VarCharType):
data_type = DataTypes.VARCHAR(logical_type.getLength(), logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.BinaryType):
data_type = DataTypes.BINARY(logical_type.getLength(), logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.VarBinaryType):
data_type = DataTypes.VARBINARY(logical_type.getLength(), logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.DecimalType):
data_type = DataTypes.DECIMAL(logical_type.getPrecision(),
logical_type.getScale(),
logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.DateType):
data_type = DataTypes.DATE(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.TimeType):
data_type = DataTypes.TIME(logical_type.getPrecision(), logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.TimestampType):
data_type = DataTypes.TIMESTAMP(precision=3, nullable=logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.BooleanType):
data_type = DataTypes.BOOLEAN(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.TinyIntType):
data_type = DataTypes.TINYINT(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.SmallIntType):
data_type = DataTypes.SMALLINT(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.IntType):
data_type = DataTypes.INT(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.BigIntType):
data_type = DataTypes.BIGINT(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.FloatType):
data_type = DataTypes.FLOAT(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.DoubleType):
data_type = DataTypes.DOUBLE(logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.ZonedTimestampType):
raise \
TypeError("Unsupported type: %s, ZonedTimestampType is not supported yet."
% j_data_type)
elif is_instance_of(logical_type, gateway.jvm.LocalZonedTimestampType):
data_type = DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(nullable=logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.DayTimeIntervalType) or \
is_instance_of(logical_type, gateway.jvm.YearMonthIntervalType):
data_type = _from_java_interval_type(logical_type)
elif is_instance_of(logical_type, gateway.jvm.LegacyTypeInformationType):
type_info = logical_type.getTypeInformation()
BasicArrayTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.\
BasicArrayTypeInfo
BasicTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo
if type_info == BasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO:
data_type = DataTypes.ARRAY(DataTypes.STRING())
elif type_info == BasicTypeInfo.BIG_DEC_TYPE_INFO:
data_type = DataTypes.DECIMAL(38, 18)
elif type_info.getClass() == \
get_java_class(gateway.jvm.org.apache.flink.table.runtime.typeutils
.BigDecimalTypeInfo):
data_type = DataTypes.DECIMAL(type_info.precision(), type_info.scale())
elif type_info.getClass() == \
get_java_class(gateway.jvm.org.apache.flink.table.dataview.ListViewTypeInfo):
data_type = DataTypes.LIST_VIEW(_from_java_data_type(type_info.getElementType()))
elif type_info.getClass() == \
get_java_class(gateway.jvm.org.apache.flink.table.dataview.MapViewTypeInfo):
data_type = DataTypes.MAP_VIEW(
_from_java_data_type(type_info.getKeyType()),
_from_java_data_type(type_info.getValueType()))
else:
raise TypeError("Unsupported type: %s, it is recognized as a legacy type."
% type_info)
elif is_instance_of(logical_type, gateway.jvm.RawType):
data_type = RawType()
else:
raise TypeError("Unsupported type: %s, it is not supported yet in current python type"
" system" % j_data_type)
return data_type
# Array Type, MultiSet Type.
elif is_instance_of(j_data_type, gateway.jvm.CollectionDataType):
logical_type = j_data_type.getLogicalType()
element_type = j_data_type.getElementDataType()
if is_instance_of(logical_type, gateway.jvm.ArrayType):
data_type = DataTypes.ARRAY(_from_java_data_type(element_type),
logical_type.isNullable())
elif is_instance_of(logical_type, gateway.jvm.MultisetType):
data_type = DataTypes.MULTISET(_from_java_data_type(element_type),
logical_type.isNullable())
else:
raise TypeError("Unsupported collection data type: %s" % j_data_type)
return data_type
# Map Type.
elif is_instance_of(j_data_type, gateway.jvm.KeyValueDataType):
logical_type = j_data_type.getLogicalType()
key_type = j_data_type.getKeyDataType()
value_type = j_data_type.getValueDataType()
if is_instance_of(logical_type, gateway.jvm.MapType):
data_type = DataTypes.MAP(
_from_java_data_type(key_type),
_from_java_data_type(value_type),
logical_type.isNullable())
else:
raise TypeError("Unsupported map data type: %s" % j_data_type)
return data_type
# Row Type.
elif is_instance_of(j_data_type, gateway.jvm.FieldsDataType):
logical_type = j_data_type.getLogicalType()
field_data_types = j_data_type.getChildren()
if is_instance_of(logical_type, gateway.jvm.RowType):
fields = [DataTypes.FIELD(name, _from_java_data_type(field_data_types[idx]))
for idx, name in enumerate(logical_type.getFieldNames())]
data_type = DataTypes.ROW(fields, logical_type.isNullable())
elif j_data_type.getConversionClass().isAssignableFrom(
gateway.jvm.org.apache.flink.table.api.dataview.ListView._java_lang_class):
array_type = _from_java_data_type(field_data_types[0])
data_type = DataTypes.LIST_VIEW(array_type.element_type)
elif j_data_type.getConversionClass().isAssignableFrom(
gateway.jvm.org.apache.flink.table.api.dataview.MapView._java_lang_class):
map_type = _from_java_data_type(field_data_types[0])
data_type = DataTypes.MAP_VIEW(map_type.key_type, map_type.value_type)
else:
raise TypeError("Unsupported row data type: %s" % j_data_type)
return data_type
# Unrecognized type.
else:
TypeError("Unsupported data type: %s" % j_data_type)
def _to_java_data_type(data_type: DataType):
"""
Converts the specified Python DataType to Java DataType.
"""
gateway = get_gateway()
JDataTypes = gateway.jvm.org.apache.flink.table.api.DataTypes
if isinstance(data_type, BooleanType):
j_data_type = JDataTypes.BOOLEAN()
elif isinstance(data_type, TinyIntType):
j_data_type = JDataTypes.TINYINT()
elif isinstance(data_type, SmallIntType):
j_data_type = JDataTypes.SMALLINT()
elif isinstance(data_type, IntType):
j_data_type = JDataTypes.INT()
elif isinstance(data_type, BigIntType):
j_data_type = JDataTypes.BIGINT()
elif isinstance(data_type, FloatType):
j_data_type = JDataTypes.FLOAT()
elif isinstance(data_type, DoubleType):
j_data_type = JDataTypes.DOUBLE()
elif isinstance(data_type, VarCharType):
j_data_type = JDataTypes.VARCHAR(data_type.length)
elif isinstance(data_type, CharType):
j_data_type = JDataTypes.CHAR(data_type.length)
elif isinstance(data_type, VarBinaryType):
j_data_type = JDataTypes.VARBINARY(data_type.length)
elif isinstance(data_type, BinaryType):
j_data_type = JDataTypes.BINARY(data_type.length)
elif isinstance(data_type, DecimalType):
j_data_type = JDataTypes.DECIMAL(data_type.precision, data_type.scale)
elif isinstance(data_type, DateType):
j_data_type = JDataTypes.DATE()
elif isinstance(data_type, TimeType):
j_data_type = JDataTypes.TIME(data_type.precision)
elif isinstance(data_type, TimestampType):
j_data_type = JDataTypes.TIMESTAMP(data_type.precision)
elif isinstance(data_type, LocalZonedTimestampType):
j_data_type = JDataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(data_type.precision)
elif isinstance(data_type, ZonedTimestampType):
j_data_type = JDataTypes.TIMESTAMP_WITH_TIME_ZONE(data_type.precision)
elif isinstance(data_type, ArrayType):
j_data_type = JDataTypes.ARRAY(_to_java_data_type(data_type.element_type))
elif isinstance(data_type, MapType):
j_data_type = JDataTypes.MAP(
_to_java_data_type(data_type.key_type),
_to_java_data_type(data_type.value_type))
elif isinstance(data_type, RowType):
fields = [JDataTypes.FIELD(f.name, _to_java_data_type(f.data_type))
for f in data_type.fields]
j_data_type = JDataTypes.ROW(to_jarray(JDataTypes.Field, fields))
elif isinstance(data_type, UserDefinedType):
if data_type.java_udt():
return gateway.jvm.org.apache.flink.util.InstantiationUtil.instantiate(
gateway.jvm.Class.forName(
data_type.java_udt(),
True,
gateway.jvm.Thread.currentThread().getContextClassLoader()))
else:
return _to_java_data_type(data_type.sql_type())
elif isinstance(data_type, MultisetType):
j_data_type = JDataTypes.MULTISET(_to_java_data_type(data_type.element_type))
elif isinstance(data_type, NullType):
j_data_type = JDataTypes.NULL()
elif isinstance(data_type, YearMonthIntervalType):
if data_type.resolution == YearMonthIntervalType.YearMonthResolution.YEAR:
j_data_type = JDataTypes.INTERVAL(JDataTypes.YEAR(data_type.precision))
elif data_type.resolution == YearMonthIntervalType.YearMonthResolution.MONTH:
j_data_type = JDataTypes.INTERVAL(JDataTypes.MONTH())
else:
j_data_type = JDataTypes.INTERVAL(JDataTypes.YEAR(data_type.precision),
JDataTypes.MONTH())
elif isinstance(data_type, DayTimeIntervalType):
if data_type.resolution == DayTimeIntervalType.DayTimeResolution.DAY:
j_data_type = JDataTypes.INTERVAL(JDataTypes.DAY(data_type.day_precision))
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_HOUR:
j_data_type = JDataTypes.INTERVAL(JDataTypes.DAY(data_type.day_precision),
JDataTypes.HOUR())
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_MINUTE:
j_data_type = JDataTypes.INTERVAL(JDataTypes.DAY(data_type.day_precision),
JDataTypes.MINUTE())
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.DAY_TO_SECOND:
j_data_type = JDataTypes.INTERVAL(JDataTypes.DAY(data_type.day_precision),
JDataTypes.SECOND(data_type.fractional_precision))
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.HOUR:
j_data_type = JDataTypes.INTERVAL(JDataTypes.HOUR())
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_MINUTE:
j_data_type = JDataTypes.INTERVAL(JDataTypes.HOUR(), JDataTypes.MINUTE())
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.HOUR_TO_SECOND:
j_data_type = JDataTypes.INTERVAL(JDataTypes.HOUR(),
JDataTypes.SECOND(data_type.fractional_precision))
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.MINUTE:
j_data_type = JDataTypes.INTERVAL(JDataTypes.MINUTE())
elif data_type.resolution == DayTimeIntervalType.DayTimeResolution.MINUTE_TO_SECOND:
j_data_type = JDataTypes.INTERVAL(JDataTypes.MINUTE(),
JDataTypes.SECOND(data_type.fractional_precision))
else:
j_data_type = JDataTypes.INTERVAL(JDataTypes.SECOND(data_type.fractional_precision))
elif isinstance(data_type, ListViewType):
return gateway.jvm.org.apache.flink.table.api.dataview.ListView.newListViewDataType(
_to_java_data_type(data_type._element_type))
elif isinstance(data_type, MapViewType):
return gateway.jvm.org.apache.flink.table.api.dataview.MapView.newMapViewDataType(
_to_java_data_type(data_type._key_type), _to_java_data_type(data_type._value_type))
else:
raise TypeError("Unsupported data type: %s" % data_type)
if data_type._nullable:
j_data_type = j_data_type.nullable()
else:
j_data_type = j_data_type.notNull()
if data_type._conversion_cls:
j_data_type = j_data_type.bridgedTo(
gateway.jvm.org.apache.flink.api.python.shaded.py4j.reflection.ReflectionUtil
.classForName(data_type._conversion_cls)
)
return j_data_type
_acceptable_types = {
BooleanType: (bool,),
TinyIntType: (int,),
SmallIntType: (int,),
IntType: (int,),
BigIntType: (int,),
FloatType: (float,),
DoubleType: (float,),
DecimalType: (decimal.Decimal,),
CharType: (str,),
VarCharType: (str,),
BinaryType: (bytearray,),
VarBinaryType: (bytearray,),
DateType: (datetime.date, datetime.datetime),
TimeType: (datetime.time,),
TimestampType: (datetime.datetime,),
DayTimeIntervalType: (datetime.timedelta,),
LocalZonedTimestampType: (datetime.datetime,),
ZonedTimestampType: (datetime.datetime,),
ArrayType: (list, tuple, array),
MapType: (dict,),
RowType: (tuple, list, dict),
}
def _create_type_verifier(data_type: DataType, name: str = None):
"""
Creates a verifier that checks the type of obj against data_type and raises a TypeError if they
do not match.
This verifier also checks the value of obj against data_type and raises a ValueError if it's
not within the allowed range, e.g. using 128 as TinyIntType will overflow. Note that, Python
float is not checked, so it will become infinity when cast to Java float if it overflows.
>>> _create_type_verifier(RowType([]))(None)
>>> _create_type_verifier(VarCharType(100))("")
>>> _create_type_verifier(BigIntType())(0)
>>> _create_type_verifier(ArrayType(SmallIntType()))(list(range(3)))
>>> _create_type_verifier(ArrayType(VarCharType(10)))(set()) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
TypeError:...
>>> _create_type_verifier(MapType(VarCharType(100), IntType()))({})
>>> _create_type_verifier(RowType([]))(())
>>> _create_type_verifier(RowType([]))([])
>>> _create_type_verifier(RowType([]))([1]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> # Check if numeric values are within the allowed range.
>>> _create_type_verifier(TinyIntType())(12)
>>> _create_type_verifier(TinyIntType())(1234) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _create_type_verifier(TinyIntType(), False)(None) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _create_type_verifier(
... ArrayType(SmallIntType(), False))([1, None]) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
>>> _create_type_verifier(MapType(VarCharType(100), IntType()))({None: 1})
Traceback (most recent call last):
...
ValueError:...
>>> schema = RowType().add("a", IntType()).add("b", VarCharType(100), False)
>>> _create_type_verifier(schema)((1, None)) # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
if name is None:
new_msg = lambda msg: msg
new_name = lambda n: "field %s" % n
else:
new_msg = lambda msg: "%s: %s" % (name, msg)
new_name = lambda n: "field %s in %s" % (n, name)
def verify_nullability(obj):
if obj is None:
if data_type._nullable:
return True
else:
raise ValueError(new_msg("This field is not nullable, but got None"))
else:
return False
_type = type(data_type)
assert _type in _acceptable_types or isinstance(data_type, UserDefinedType),\
new_msg("unknown datatype: %s" % data_type)
def verify_acceptable_types(obj):
# subclass of them can not be from_sql_type in JVM
if type(obj) not in _acceptable_types[_type]:
raise TypeError(new_msg("%s can not accept object %r in type %s"
% (data_type, obj, type(obj))))
if isinstance(data_type, CharType):
def verify_char(obj):
verify_acceptable_types(obj)
if len(obj) != data_type.length:
raise ValueError(new_msg(
"length of object (%s) of CharType is not: %d" % (obj, data_type.length)))
verify_value = verify_char
elif isinstance(data_type, VarCharType):
def verify_varchar(obj):
verify_acceptable_types(obj)
if len(obj) > data_type.length:
raise ValueError(new_msg(
"length of object (%s) of VarCharType exceeds: %d" % (obj, data_type.length)))
verify_value = verify_varchar
elif isinstance(data_type, BinaryType):
def verify_binary(obj):
verify_acceptable_types(obj)
if len(obj) != data_type.length:
raise ValueError(new_msg(
"length of object (%s) of BinaryType is not: %d" % (obj, data_type.length)))
verify_value = verify_binary
elif isinstance(data_type, VarBinaryType):
def verify_varbinary(obj):
verify_acceptable_types(obj)
if len(obj) > data_type.length:
raise ValueError(new_msg(
"length of object (%s) of VarBinaryType exceeds: %d"
% (obj, data_type.length)))
verify_value = verify_varbinary
elif isinstance(data_type, UserDefinedType):
sql_type = data_type.sql_type()
verifier = _create_type_verifier(sql_type, name=name)
def verify_udf(obj):
if not (hasattr(obj, '__UDT__') and obj.__UDT__ == data_type):
raise ValueError(new_msg("%r is not an instance of type %r" % (obj, data_type)))
data = data_type.to_sql_type(obj)
if isinstance(sql_type, RowType):
# remove the RowKind value in the first position.
data = data[1:]
verifier(data)
verify_value = verify_udf
elif isinstance(data_type, TinyIntType):
def verify_tiny_int(obj):
verify_acceptable_types(obj)
if obj < -128 or obj > 127:
raise ValueError(new_msg("object of TinyIntType out of range, got: %s" % obj))
verify_value = verify_tiny_int
elif isinstance(data_type, SmallIntType):
def verify_small_int(obj):
verify_acceptable_types(obj)
if obj < -32768 or obj > 32767:
raise ValueError(new_msg("object of SmallIntType out of range, got: %s" % obj))
verify_value = verify_small_int
elif isinstance(data_type, IntType):
def verify_integer(obj):
verify_acceptable_types(obj)
if obj < -2147483648 or obj > 2147483647:
raise ValueError(
new_msg("object of IntType out of range, got: %s" % obj))
verify_value = verify_integer
elif isinstance(data_type, ArrayType):
element_verifier = _create_type_verifier(
data_type.element_type, name="element in array %s" % name)
def verify_array(obj):
verify_acceptable_types(obj)
for i in obj:
element_verifier(i)
verify_value = verify_array
elif isinstance(data_type, MapType):
key_verifier = _create_type_verifier(data_type.key_type, name="key of map %s" % name)
value_verifier = _create_type_verifier(data_type.value_type, name="value of map %s" % name)
def verify_map(obj):
verify_acceptable_types(obj)
for k, v in obj.items():
key_verifier(k)
value_verifier(v)
verify_value = verify_map
elif isinstance(data_type, RowType):
verifiers = []
for f in data_type.fields:
verifier = _create_type_verifier(f.data_type, name=new_name(f.name))
verifiers.append((f.name, verifier))
def verify_row_field(obj):
if isinstance(obj, dict):
for f, verifier in verifiers:
verifier(obj.get(f))
elif isinstance(obj, Row) and getattr(obj, "_from_dict", False):
# the order in obj could be different than dataType.fields
for f, verifier in verifiers:
verifier(obj[f])
elif isinstance(obj, (tuple, list)):
if len(obj) != len(verifiers):
raise ValueError(
new_msg("Length of object (%d) does not match with "
"length of fields (%d)" % (len(obj), len(verifiers))))
for v, (_, verifier) in zip(obj, verifiers):
verifier(v)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
for f, verifier in verifiers:
verifier(d.get(f))
else:
raise TypeError(new_msg("RowType can not accept object %r in type %s"
% (obj, type(obj))))
verify_value = verify_row_field
else:
def verify_default(obj):
verify_acceptable_types(obj)
verify_value = verify_default
def verify(obj):
if not verify_nullability(obj):
verify_value(obj)
return verify
def create_arrow_schema(field_names: List[str], field_types: List[DataType]):
"""
Create an Arrow schema with the specified filed names and types.
"""
import pyarrow as pa
fields = [pa.field(field_name, to_arrow_type(field_type), field_type._nullable)
for field_name, field_type in zip(field_names, field_types)]
return pa.schema(fields)
def from_arrow_type(arrow_type, nullable: bool = True) -> DataType:
"""
Convert Arrow type to Flink data type.
"""
from pyarrow import types
if types.is_boolean(arrow_type):
return BooleanType(nullable)
elif types.is_int8(arrow_type):
return TinyIntType(nullable)
elif types.is_int16(arrow_type):
return SmallIntType(nullable)
elif types.is_int32(arrow_type):
return IntType(nullable)
elif types.is_int64(arrow_type):
return BigIntType(nullable)
elif types.is_float32(arrow_type):
return FloatType(nullable)
elif types.is_float64(arrow_type):
return DoubleType(nullable)
elif types.is_decimal(arrow_type):
return DecimalType(arrow_type.precision, arrow_type.scale, nullable)
elif types.is_string(arrow_type):
return VarCharType(0x7fffffff, nullable)
elif types.is_fixed_size_binary(arrow_type):
return BinaryType(arrow_type.byte_width, nullable)
elif types.is_binary(arrow_type):
return VarBinaryType(0x7fffffff, nullable)
elif types.is_date32(arrow_type):
return DateType(nullable)
elif types.is_time32(arrow_type):
if str(arrow_type) == 'time32[s]':
return TimeType(0, nullable)
else:
return TimeType(3, nullable)
elif types.is_time64(arrow_type):
if str(arrow_type) == 'time64[us]':
return TimeType(6, nullable)
else:
return TimeType(9, nullable)
elif types.is_timestamp(arrow_type):
if arrow_type.unit == 's':
return TimestampType(0, nullable)
elif arrow_type.unit == 'ms':
return TimestampType(3, nullable)
elif arrow_type.unit == 'us':
return TimestampType(6, nullable)
else:
return TimestampType(9, nullable)
elif types.is_map(arrow_type):
return MapType(from_arrow_type(arrow_type.key_type),
from_arrow_type(arrow_type.item_type),
nullable)
elif types.is_list(arrow_type):
return ArrayType(from_arrow_type(arrow_type.value_type), nullable)
elif types.is_struct(arrow_type):
if any(types.is_struct(field.type) for field in arrow_type):
raise TypeError("Nested RowType is not supported in conversion from Arrow: " +
str(arrow_type))
return RowType([RowField(field.name, from_arrow_type(field.type, field.nullable))
for field in arrow_type])
elif types.is_null(arrow_type):
return NullType()
else:
raise TypeError("Unsupported data type to convert to Arrow type: " + str(dt))
def to_arrow_type(data_type: DataType):
"""
Converts the specified Flink data type to pyarrow data type.
"""
import pyarrow as pa
if isinstance(data_type, TinyIntType):
return pa.int8()
elif isinstance(data_type, SmallIntType):
return pa.int16()
elif isinstance(data_type, IntType):
return pa.int32()
elif isinstance(data_type, BigIntType):
return pa.int64()
elif isinstance(data_type, BooleanType):
return pa.bool_()
elif isinstance(data_type, FloatType):
return pa.float32()
elif isinstance(data_type, DoubleType):
return pa.float64()
elif isinstance(data_type, (CharType, VarCharType)):
return pa.utf8()
elif isinstance(data_type, BinaryType):
return pa.binary(data_type.length)
elif isinstance(data_type, VarBinaryType):
return pa.binary()
elif isinstance(data_type, DecimalType):
return pa.decimal128(data_type.precision, data_type.scale)
elif isinstance(data_type, DateType):
return pa.date32()
elif isinstance(data_type, TimeType):
if data_type.precision == 0:
return pa.time32('s')
elif 1 <= data_type.precision <= 3:
return pa.time32('ms')
elif 4 <= data_type.precision <= 6:
return pa.time64('us')
else:
return pa.time64('ns')
elif isinstance(data_type, (LocalZonedTimestampType, TimestampType)):
if data_type.precision == 0:
return pa.timestamp('s')
elif 1 <= data_type.precision <= 3:
return pa.timestamp('ms')
elif 4 <= data_type.precision <= 6:
return pa.timestamp('us')
else:
return pa.timestamp('ns')
elif isinstance(data_type, MapType):
return pa.map_(to_arrow_type(data_type.key_type), to_arrow_type(data_type.value_type))
elif isinstance(data_type, ArrayType):
if type(data_type.element_type) in [LocalZonedTimestampType, RowType]:
raise ValueError("%s is not supported to be used as the element type of ArrayType." %
data_type.element_type)
return pa.list_(to_arrow_type(data_type.element_type))
elif isinstance(data_type, RowType):
for field in data_type:
if type(field.data_type) in [LocalZonedTimestampType, RowType]:
raise TypeError("%s is not supported to be used as the field type of RowType" %
field.data_type)
fields = [pa.field(field.name, to_arrow_type(field.data_type), field.data_type._nullable)
for field in data_type]
return pa.struct(fields)
elif isinstance(data_type, NullType):
return pa.null()
else:
raise ValueError("field_type %s is not supported." % data_type)
class DataTypes(object):
"""
A :class:`DataType` can be used to declare input and/or output types of operations.
This class enumerates all supported data types of the Table & SQL API.
"""
@staticmethod
def NULL() -> NullType:
"""
Data type for representing untyped null (None) values. A null type has no
other value except null (None), thus, it can be cast to any nullable type.
This type helps in representing unknown types in API calls that use a null
(None) literal as well as bridging to formats such as JSON or Avro that
define such a type as well.
The null type is an extension to the SQL standard.
.. note:: `NullType` is still not supported yet.
"""
return NullType()
@staticmethod
def CHAR(length: int, nullable: bool = True) -> CharType:
"""
Data type of a fixed-length character string.
:param length: int, the string representation length. It must have a value
between 1 and 2147483647(0x7fffffff) (both inclusive).
:param nullable: boolean, whether the type can be null (None) or not.
"""
return CharType(length, nullable)
@staticmethod
def VARCHAR(length: int, nullable: bool = True) -> VarCharType:
"""
Data type of a variable-length character string.
:param length: int, the maximum string representation length. It must have a
value between 1 and 2147483647(0x7fffffff) (both inclusive).
:param nullable: boolean, whether the type can be null (None) or not.
.. note:: The length limit must be 0x7fffffff(2147483647) currently.
.. seealso:: :func:`~DataTypes.STRING`
"""
return VarCharType(length, nullable)
@staticmethod
def STRING(nullable: bool = True) -> VarCharType:
"""
Data type of a variable-length character string with defined maximum length.
This is a shortcut for ``DataTypes.VARCHAR(2147483647)``.
:param nullable: boolean, whether the type can be null (None) or not.
.. seealso:: :func:`~DataTypes.VARCHAR`
"""
return DataTypes.VARCHAR(0x7fffffff, nullable)
@staticmethod
def BOOLEAN(nullable: bool = True) -> BooleanType:
"""
Data type of a boolean with a (possibly) three-valued logic of
TRUE, FALSE, UNKNOWN.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return BooleanType(nullable)
@staticmethod
def BINARY(length: int, nullable: bool = True) -> BinaryType:
"""
Data type of a fixed-length binary string (=a sequence of bytes).
:param length: int, the number of bytes. It must have a value between
1 and 2147483647(0x7fffffff) (both inclusive).
:param nullable: boolean, whether the type can be null (None) or not.
"""
return BinaryType(length, nullable)
@staticmethod
def VARBINARY(length: int, nullable: bool = True) -> VarBinaryType:
"""
Data type of a variable-length binary string (=a sequence of bytes)
:param length: int, the maximum number of bytes. It must have a value
between 1 and 2147483647(0x7fffffff) (both inclusive).
:param nullable: boolean, whether the type can be null (None) or not.
.. seealso:: :func:`~DataTypes.BYTES`
"""
return VarBinaryType(length, nullable)
@staticmethod
def BYTES(nullable: bool = True) -> VarBinaryType:
"""
Data type of a variable-length binary string (=a sequence of bytes) with
defined maximum length. This is a shortcut for ``DataTypes.VARBINARY(2147483647)``.
:param nullable: boolean, whether the type can be null (None) or not.
.. seealso:: :func:`~DataTypes.VARBINARY`
"""
return DataTypes.VARBINARY(0x7fffffff, nullable)
@staticmethod
def DECIMAL(precision: int, scale: int, nullable: bool = True) -> DecimalType:
"""
Data type of a decimal number with fixed precision and scale.
:param precision: the number of digits in a number. It must have a value
between 1 and 38 (both inclusive).
:param scale: the number of digits on right side of dot. It must have
a value between 0 and precision (both inclusive).
:param nullable: boolean, whether the type can be null (None) or not.
.. note:: The precision must be 38 and the scale must be 18 currently.
"""
return DecimalType(precision, scale, nullable)
@staticmethod
def TINYINT(nullable: bool = True) -> TinyIntType:
"""
Data type of a 1-byte signed integer with values from -128 to 127.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return TinyIntType(nullable)
@staticmethod
def SMALLINT(nullable: bool = True) -> SmallIntType:
"""
Data type of a 2-byte signed integer with values from -32,768 to 32,767.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return SmallIntType(nullable)
@staticmethod
def INT(nullable: bool = True) -> IntType:
"""
Data type of a 2-byte signed integer with values from -2,147,483,648
to 2,147,483,647.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return IntType(nullable)
@staticmethod
def BIGINT(nullable: bool = True) -> BigIntType:
"""
Data type of an 8-byte signed integer with values from
-9,223,372,036,854,775,808 to 9,223,372,036,854,775,807.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return BigIntType(nullable)
@staticmethod
def FLOAT(nullable: bool = True) -> FloatType:
"""
Data type of a 4-byte single precision floating point number.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return FloatType(nullable)
@staticmethod
def DOUBLE(nullable: bool = True) -> DoubleType:
"""
Data type of an 8-byte double precision floating point number.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return DoubleType(nullable)
@staticmethod
def DATE(nullable: bool = True) -> DateType:
"""
Data type of a date consisting of year-month-day with values ranging
from ``0000-01-01`` to ``9999-12-31``.
Compared to the SQL standard, the range starts at year 0000.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return DateType(nullable)
@staticmethod
def TIME(precision: int = 0, nullable: bool = True) -> TimeType:
"""
Data type of a time WITHOUT time zone.
An instance consists of hour:minute:second[.fractional with up to nanosecond
precision and values ranging from ``00:00:00.000000000`` to ``23:59:59.999999999``.
Compared to the SQL standard, leap seconds (23:59:60 and 23:59:61)
are not supported.
:param precision: int, the number of digits of fractional seconds. It must
have a value between 0 and 9 (both inclusive).
:param nullable: boolean, whether the type can be null (None) or not.
.. note:: The precision must be 0 currently.
"""
return TimeType(precision, nullable)
@staticmethod
def TIMESTAMP(precision: int = 6, nullable: bool = True) -> TimestampType:
"""
Data type of a timestamp WITHOUT time zone.
An instance consists of year-month-day hour:minute:second[.fractional
with up to nanosecond precision and values ranging from
``0000-01-01 00:00:00.000000000`` to ``9999-12-31 23:59:59.999999999``.
Compared to the SQL standard, leap seconds (``23:59:60`` and ``23:59:61``)
are not supported.
This class does not store or represent a time-zone. Instead, it is a description of
the date, as used for birthdays, combined with the local time as seen on a wall clock.
It cannot represent an instant on the time-line without additional information
such as an offset or time-zone.
:param precision: int, the number of digits of fractional seconds.
It must have a value between 0 and 9 (both inclusive). (default: 6)
:param nullable: boolean, whether the type can be null (None) or not.
.. note:: The precision must be 3 currently.
"""
return TimestampType(precision, nullable)
@staticmethod
def TIMESTAMP_WITH_LOCAL_TIME_ZONE(precision: int = 6, nullable: bool = True) \
-> LocalZonedTimestampType:
"""
Data type of a timestamp WITH LOCAL time zone.
An instance consists of year-month-day hour:minute:second[.fractional
with up to nanosecond precision and values ranging from
``0000-01-01 00:00:00.000000000 +14:59`` to ``9999-12-31 23:59:59.999999999 -14:59``.
Compared to the SQL standard, leap seconds (``23:59:60`` and ``23:59:61``)
are not supported.
The value will be stored internally as a long value which stores all date and time
fields, to a precision of nanoseconds, as well as the offset from UTC/Greenwich.
:param precision: int, the number of digits of fractional seconds.
It must have a value between 0 and 9 (both inclusive). (default: 6)
:param nullable: boolean, whether the type can be null (None) or not.
.. note:: `LocalZonedTimestampType` only supports precision of 3 currently.
"""
return LocalZonedTimestampType(precision, nullable)
@staticmethod
def TIMESTAMP_LTZ(precision: int = 6, nullable: bool = True) \
-> LocalZonedTimestampType:
"""
Data type of a timestamp WITH LOCAL time zone.
This is a shortcut for ``DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(precision, nullable)``.
:param precision: int, the number of digits of fractional seconds.
It must have a value between 0 and 9 (both inclusive). (default: 6, only
supports 3 when bridged to DataStream)
:param nullable: boolean, whether the type can be null (None) or not.
.. seealso:: :func:`~DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(precision, nullable)`
"""
return LocalZonedTimestampType(precision, nullable)
@staticmethod
def ARRAY(element_type: DataType, nullable: bool = True) -> ArrayType:
"""
Data type of an array of elements with same subtype.
Compared to the SQL standard, the maximum cardinality of an array cannot
be specified but is fixed at 2147483647(0x7fffffff). Also, any valid
type is supported as a subtype.
:param element_type: :class:`DataType` of each element in the array.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return ArrayType(element_type, nullable)
@staticmethod
def LIST_VIEW(element_type: DataType) -> ListViewType:
"""
Data type of a :class:`pyflink.table.data_view.ListView`.
It can only be used in accumulator type declaration of an Aggregate Function.
:param element_type: :class:`DataType` of each element in the list view.
"""
return ListViewType(element_type)
@staticmethod
def MAP(key_type: DataType, value_type: DataType, nullable: bool = True) -> MapType:
"""
Data type of an associative array that maps keys to values. A map
cannot contain duplicate keys; each key can map to at most one value.
There is no restriction of key types; it is the responsibility of the
user to ensure uniqueness. The map type is an extension to the SQL standard.
:param key_type: :class:`DataType` of the keys in the map.
:param value_type: :class:`DataType` of the values in the map.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return MapType(key_type, value_type, nullable)
@staticmethod
def MAP_VIEW(key_type: DataType, value_type: DataType) -> MapViewType:
"""
Data type of a :class:`pyflink.table.data_view.ListView`.
It can only be used in accumulator type declaration of an Aggregate Function.
:param key_type: :class:`DataType` of the keys in the map view.
:param value_type: :class:`DataType` of the values in the map view.
"""
return MapViewType(key_type, value_type)
@staticmethod
def MULTISET(element_type: DataType, nullable: bool = True) -> MultisetType:
"""
Data type of a multiset (=bag). Unlike a set, it allows for multiple
instances for each of its elements with a common subtype. Each unique
value is mapped to some multiplicity.
There is no restriction of element types; it is the responsibility
of the user to ensure uniqueness.
:param element_type: :class:`DataType` of each element in the multiset.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return MultisetType(element_type, nullable)
@staticmethod
def ROW(row_fields: List = [], nullable: bool = True) -> RowType:
"""
Data type of a sequence of fields. A field consists of a field name,
field type, and an optional description. The most specific type of
a row of a table is a row type. In this case, each column of the row
corresponds to the field of the row type that has the same ordinal
position as the column.
Compared to the SQL standard, an optional field description simplifies
the handling with complex structures.
:param row_fields: a list of row field types which can be created via
:func:`DataTypes.FIELD`.
:param nullable: boolean, whether the type can be null (None) or not.
"""
return RowType(row_fields, nullable)
@staticmethod
def FIELD(name: str, data_type: DataType, description: str = None) -> RowField:
"""
Field definition with field name, data type, and a description.
:param name: string, name of the field.
:param data_type: :class:`DataType` of the field.
:param description: string, description of the field.
"""
return RowField(name, data_type, description)
@staticmethod
def SECOND(precision: int = DayTimeIntervalType.DEFAULT_FRACTIONAL_PRECISION) -> Resolution:
"""
Resolution in seconds and (possibly) fractional seconds.
:param precision: int, the number of digits of fractional seconds. It must have a value
between 0 and 9 (both inclusive), (default: 6).
:return: the specified :class:`Resolution`.
.. note:: the precision must be 3 currently.
.. seealso:: :func:`~pyflink.table.DataTypes.INTERVAL`
"""
return Resolution(Resolution.IntervalUnit.SECOND, precision)
@staticmethod
def MINUTE() -> Resolution:
"""
Resolution in minutes.
:return: the specified :class:`Resolution`.
.. seealso:: :func:`~pyflink.table.DataTypes.INTERVAL`
"""
return Resolution(Resolution.IntervalUnit.MINUTE)
@staticmethod
def HOUR() -> Resolution:
"""
Resolution in hours.
:return: :class:`Resolution`
.. seealso:: :func:`~pyflink.table.DataTypes.INTERVAL`
"""
return Resolution(Resolution.IntervalUnit.HOUR)
@staticmethod
def DAY(precision: int = DayTimeIntervalType.DEFAULT_DAY_PRECISION) -> Resolution:
"""
Resolution in days.
:param precision: int, the number of digits of days. It must have a value between 1 and
6 (both inclusive), (default: 2).
:return: the specified :class:`Resolution`.
.. seealso:: :func:`~pyflink.table.DataTypes.INTERVAL`
"""
return Resolution(Resolution.IntervalUnit.DAY, precision)
@staticmethod
def MONTH() -> Resolution:
"""
Resolution in months.
:return: the specified :class:`Resolution`.
.. seealso:: :func:`~pyflink.table.DataTypes.INTERVAL`
"""
return Resolution(Resolution.IntervalUnit.MONTH)
@staticmethod
def YEAR(precision: int = YearMonthIntervalType.DEFAULT_PRECISION) -> Resolution:
"""
Resolution in years with 2 digits for the number of years by default.
:param precision: the number of digits of years. It must have a value between 1 and
4 (both inclusive), (default 2).
:return: the specified :class:`Resolution`.
.. seealso:: :func:`~pyflink.table.DataTypes.INTERVAL`
"""
return Resolution(Resolution.IntervalUnit.YEAR, precision)
@staticmethod
def INTERVAL(upper_resolution: Resolution, lower_resolution: Resolution = None) \
-> Union[DayTimeIntervalType, YearMonthIntervalType]:
"""
Data type of a temporal interval. There are two types of temporal intervals: day-time
intervals with up to nanosecond granularity or year-month intervals with up to month
granularity.
An interval of day-time consists of ``+days hours:months:seconds.fractional`` with values
ranging from ``-999999 23:59:59.999999999`` to ``+999999 23:59:59.999999999``. The type
must be parameterized to one of the following resolutions: interval of days, interval of
days to hours, interval of days to minutes, interval of days to seconds, interval of hours,
interval of hours to minutes, interval of hours to seconds, interval of minutes,
interval of minutes to seconds, or interval of seconds. The value representation is the
same for all types of resolutions. For example, an interval of seconds of 70 is always
represented in an interval-of-days-to-seconds format (with default precisions):
``+00 00:01:10.000000``.
An interval of year-month consists of ``+years-months`` with values ranging from
``-9999-11`` to ``+9999-11``. The type must be parameterized to one of the following
resolutions: interval of years, interval of years to months, or interval of months. The
value representation is the same for all types of resolutions. For example, an interval
of months of 50 is always represented in an interval-of-years-to-months format (with
default year precision): ``+04-02``.
Examples: ``INTERVAL(DAY(2), SECOND(9))`` for a day-time interval or
``INTERVAL(YEAR(4), MONTH())`` for a year-month interval.
:param upper_resolution: :class:`Resolution`, the upper resolution of the interval.
:param lower_resolution: :class:`Resolution`, the lower resolution of the interval.
.. note:: the upper_resolution must be `MONTH` for `YearMonthIntervalType`, `SECOND` for
`DayTimeIntervalType` and the lower_resolution must be None currently.
.. seealso:: :func:`~pyflink.table.DataTypes.SECOND`
.. seealso:: :func:`~pyflink.table.DataTypes.MINUTE`
.. seealso:: :func:`~pyflink.table.DataTypes.HOUR`
.. seealso:: :func:`~pyflink.table.DataTypes.DAY`
.. seealso:: :func:`~pyflink.table.DataTypes.MONTH`
.. seealso:: :func:`~pyflink.table.DataTypes.YEAR`
"""
return _from_resolution(upper_resolution, lower_resolution)
| 110,432 | 38.230195 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/udf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
import functools
import inspect
from typing import Union, List, Type, Callable, TypeVar, Generic, Iterable
from pyflink.java_gateway import get_gateway
from pyflink.metrics import MetricGroup
from pyflink.table import Expression
from pyflink.table.types import DataType, _to_java_data_type
from pyflink.util import java_utils
__all__ = ['FunctionContext', 'AggregateFunction', 'ScalarFunction', 'TableFunction',
'TableAggregateFunction', 'udf', 'udtf', 'udaf', 'udtaf']
class FunctionContext(object):
"""
Used to obtain global runtime information about the context in which the
user-defined function is executed. The information includes the metric group,
and global job parameters, etc.
"""
def __init__(self, base_metric_group, job_parameters):
self._base_metric_group = base_metric_group
self._job_parameters = job_parameters
def get_metric_group(self) -> MetricGroup:
"""
Returns the metric group for this parallel subtask.
.. versionadded:: 1.11.0
"""
if self._base_metric_group is None:
raise RuntimeError("Metric has not been enabled. You can enable "
"metric with the 'python.metric.enabled' configuration.")
return self._base_metric_group
def get_job_parameter(self, key: str, default_value: str) -> str:
"""
Gets the global job parameter value associated with the given key as a string.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case global job parameter is
null or there is no value associated with the given key.
.. versionadded:: 1.17.0
"""
return self._job_parameters[key] if key in self._job_parameters else default_value
class UserDefinedFunction(abc.ABC):
"""
Base interface for user-defined function.
.. versionadded:: 1.10.0
"""
def open(self, function_context: FunctionContext):
"""
Initialization method for the function. It is called before the actual working methods
and thus suitable for one time setup work.
:param function_context: the context of the function
:type function_context: FunctionContext
"""
pass
def close(self):
"""
Tear-down method for the user code. It is called after the last call to the main
working methods.
"""
pass
def is_deterministic(self) -> bool:
"""
Returns information about the determinism of the function's results.
It returns true if and only if a call to this function is guaranteed to
always return the same result given the same parameters. true is assumed by default.
If the function is not pure functional like random(), date(), now(),
this method must return false.
:return: the determinism of the function's results.
"""
return True
class ScalarFunction(UserDefinedFunction):
"""
Base interface for user-defined scalar function. A user-defined scalar functions maps zero, one,
or multiple scalar values to a new scalar value.
.. versionadded:: 1.10.0
"""
@abc.abstractmethod
def eval(self, *args):
"""
Method which defines the logic of the scalar function.
"""
pass
class TableFunction(UserDefinedFunction):
"""
Base interface for user-defined table function. A user-defined table function creates zero, one,
or multiple rows to a new row value.
.. versionadded:: 1.11.0
"""
@abc.abstractmethod
def eval(self, *args):
"""
Method which defines the logic of the table function.
"""
pass
T = TypeVar('T')
ACC = TypeVar('ACC')
class ImperativeAggregateFunction(UserDefinedFunction, Generic[T, ACC]):
"""
Base interface for user-defined aggregate function and table aggregate function.
This class is used for unified handling of imperative aggregating functions. Concrete
implementations should extend from :class:`~pyflink.table.AggregateFunction` or
:class:`~pyflink.table.TableAggregateFunction`.
.. versionadded:: 1.13.0
"""
@abc.abstractmethod
def create_accumulator(self) -> ACC:
"""
Creates and initializes the accumulator for this AggregateFunction.
:return: the accumulator with the initial value
"""
pass
@abc.abstractmethod
def accumulate(self, accumulator: ACC, *args):
"""
Processes the input values and updates the provided accumulator instance.
:param accumulator: the accumulator which contains the current aggregated results
:param args: the input value (usually obtained from new arrived data)
"""
pass
def retract(self, accumulator: ACC, *args):
"""
Retracts the input values from the accumulator instance.The current design assumes the
inputs are the values that have been previously accumulated.
:param accumulator: the accumulator which contains the current aggregated results
:param args: the input value (usually obtained from new arrived data).
"""
raise RuntimeError("Method retract is not implemented")
def merge(self, accumulator: ACC, accumulators):
"""
Merges a group of accumulator instances into one accumulator instance. This method must be
implemented for unbounded session window grouping aggregates and bounded grouping
aggregates.
:param accumulator: the accumulator which will keep the merged aggregate results. It should
be noted that the accumulator may contain the previous aggregated
results. Therefore user should not replace or clean this instance in the
custom merge method.
:param accumulators: a group of accumulators that will be merged.
"""
raise RuntimeError("Method merge is not implemented")
def get_result_type(self) -> Union[DataType, str]:
"""
Returns the DataType of the AggregateFunction's result.
:return: The :class:`~pyflink.table.types.DataType` of the AggregateFunction's result.
"""
raise RuntimeError("Method get_result_type is not implemented")
def get_accumulator_type(self) -> Union[DataType, str]:
"""
Returns the DataType of the AggregateFunction's accumulator.
:return: The :class:`~pyflink.table.types.DataType` of the AggregateFunction's accumulator.
"""
raise RuntimeError("Method get_accumulator_type is not implemented")
class AggregateFunction(ImperativeAggregateFunction):
"""
Base interface for user-defined aggregate function. A user-defined aggregate function maps
scalar values of multiple rows to a new scalar value.
.. versionadded:: 1.12.0
"""
@abc.abstractmethod
def get_value(self, accumulator: ACC) -> T:
"""
Called every time when an aggregation result should be materialized. The returned value
could be either an early and incomplete result (periodically emitted as data arrives) or
the final result of the aggregation.
:param accumulator: the accumulator which contains the current intermediate results
:return: the aggregation result
"""
pass
class TableAggregateFunction(ImperativeAggregateFunction):
"""
Base class for a user-defined table aggregate function. A user-defined table aggregate function
maps scalar values of multiple rows to zero, one, or multiple rows (or structured types). If an
output record consists of only one field, the structured record can be omitted, and a scalar
value can be emitted that will be implicitly wrapped into a row by the runtime.
.. versionadded:: 1.13.0
"""
@abc.abstractmethod
def emit_value(self, accumulator: ACC) -> Iterable[T]:
"""
Called every time when an aggregation result should be materialized. The returned value
could be either an early and incomplete result (periodically emitted as data arrives) or the
final result of the aggregation.
:param accumulator: the accumulator which contains the current aggregated results.
:return: multiple aggregated result
"""
pass
class DelegatingScalarFunction(ScalarFunction):
"""
Helper scalar function implementation for lambda expression and python function. It's for
internal use only.
"""
def __init__(self, func):
self.func = func
def eval(self, *args):
return self.func(*args)
class DelegationTableFunction(TableFunction):
"""
Helper table function implementation for lambda expression and python function. It's for
internal use only.
"""
def __init__(self, func):
self.func = func
def eval(self, *args):
return self.func(*args)
class DelegatingPandasAggregateFunction(AggregateFunction):
"""
Helper pandas aggregate function implementation for lambda expression and python function.
It's for internal use only.
"""
def __init__(self, func):
self.func = func
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
accumulator.append(self.func(*args))
class PandasAggregateFunctionWrapper(object):
"""
Wrapper for Pandas Aggregate function.
"""
def __init__(self, func: AggregateFunction):
self.func = func
def open(self, function_context: FunctionContext):
self.func.open(function_context)
def eval(self, *args):
accumulator = self.func.create_accumulator()
self.func.accumulate(accumulator, *args)
return self.func.get_value(accumulator)
def close(self):
self.func.close()
class UserDefinedFunctionWrapper(object):
"""
Base Wrapper for Python user-defined function. It handles things like converting lambda
functions to user-defined functions, creating the Java user-defined function representation,
etc. It's for internal use only.
"""
def __init__(self, func, input_types, func_type, deterministic=None, name=None):
if inspect.isclass(func) or (
not isinstance(func, UserDefinedFunction) and not callable(func)):
raise TypeError(
"Invalid function: not a function or callable (__call__ is not defined): {0}"
.format(type(func)))
if input_types is not None:
from pyflink.table.types import RowType
if isinstance(input_types, RowType):
input_types = input_types.field_types()
elif isinstance(input_types, (DataType, str)):
input_types = [input_types]
else:
input_types = list(input_types)
for input_type in input_types:
if not isinstance(input_type, (DataType, str)):
raise TypeError(
"Invalid input_type: input_type should be DataType or str but contains {}"
.format(input_type))
self._func = func
self._input_types = input_types
self._name = name or (
func.__name__ if hasattr(func, '__name__') else func.__class__.__name__)
if deterministic is not None and isinstance(func, UserDefinedFunction) and deterministic \
!= func.is_deterministic():
raise ValueError("Inconsistent deterministic: {} and {}".format(
deterministic, func.is_deterministic()))
# default deterministic is True
self._deterministic = deterministic if deterministic is not None else (
func.is_deterministic() if isinstance(func, UserDefinedFunction) else True)
self._func_type = func_type
self._judf_placeholder = None
self._takes_row_as_input = False
def __call__(self, *args) -> Expression:
from pyflink.table import expressions as expr
return expr.call(self, *args)
def alias(self, *alias_names: str):
self._alias_names = alias_names
return self
def _set_takes_row_as_input(self):
self._takes_row_as_input = True
return self
def _java_user_defined_function(self):
if self._judf_placeholder is None:
gateway = get_gateway()
def get_python_function_kind():
JPythonFunctionKind = gateway.jvm.org.apache.flink.table.functions.python. \
PythonFunctionKind
if self._func_type == "general":
return JPythonFunctionKind.GENERAL
elif self._func_type == "pandas":
return JPythonFunctionKind.PANDAS
else:
raise TypeError("Unsupported func_type: %s." % self._func_type)
if self._input_types is not None:
if isinstance(self._input_types[0], str):
j_input_types = java_utils.to_jarray(gateway.jvm.String, self._input_types)
else:
j_input_types = java_utils.to_jarray(
gateway.jvm.DataType, [_to_java_data_type(i) for i in self._input_types])
else:
j_input_types = None
j_function_kind = get_python_function_kind()
func = self._func
if not isinstance(self._func, UserDefinedFunction):
func = self._create_delegate_function()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
self._judf_placeholder = \
self._create_judf(serialized_func, j_input_types, j_function_kind)
return self._judf_placeholder
def _create_delegate_function(self) -> UserDefinedFunction:
pass
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
pass
class UserDefinedScalarFunctionWrapper(UserDefinedFunctionWrapper):
"""
Wrapper for Python user-defined scalar function.
"""
def __init__(self, func, input_types, result_type, func_type, deterministic, name):
super(UserDefinedScalarFunctionWrapper, self).__init__(
func, input_types, func_type, deterministic, name)
if not isinstance(result_type, (DataType, str)):
raise TypeError(
"Invalid returnType: returnType should be DataType or str but is {}".format(
result_type))
self._result_type = result_type
self._judf_placeholder = None
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
gateway = get_gateway()
if isinstance(self._result_type, DataType):
j_result_type = _to_java_data_type(self._result_type)
else:
j_result_type = self._result_type
PythonScalarFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonScalarFunction
j_scalar_function = PythonScalarFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
j_function_kind,
self._deterministic,
self._takes_row_as_input,
_get_python_env())
return j_scalar_function
def _create_delegate_function(self) -> UserDefinedFunction:
return DelegatingScalarFunction(self._func)
class UserDefinedTableFunctionWrapper(UserDefinedFunctionWrapper):
"""
Wrapper for Python user-defined table function.
"""
def __init__(self, func, input_types, result_types, deterministic=None, name=None):
super(UserDefinedTableFunctionWrapper, self).__init__(
func, input_types, "general", deterministic, name)
from pyflink.table.types import RowType
if isinstance(result_types, RowType):
# DataTypes.ROW([DataTypes.FIELD("f0", DataTypes.INT()),
# DataTypes.FIELD("f1", DataTypes.BIGINT())])
result_types = result_types.field_types()
elif isinstance(result_types, str):
# ROW<f0 INT, f1 BIGINT>
result_types = result_types
elif isinstance(result_types, DataType):
# DataTypes.INT()
result_types = [result_types]
else:
# [DataTypes.INT(), DataTypes.BIGINT()]
result_types = list(result_types)
for result_type in result_types:
if not isinstance(result_type, (DataType, str)):
raise TypeError(
"Invalid result_type: result_type should be DataType or str but contains {}"
.format(result_type))
self._result_types = result_types
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
gateway = get_gateway()
if isinstance(self._result_types, str):
j_result_type = self._result_types
elif isinstance(self._result_types[0], DataType):
j_result_types = java_utils.to_jarray(
gateway.jvm.DataType, [_to_java_data_type(i) for i in self._result_types])
j_result_type = gateway.jvm.DataTypes.ROW(j_result_types)
else:
j_result_type = 'Row<{0}>'.format(','.join(
['f{0} {1}'.format(i, result_type)
for i, result_type in enumerate(self._result_types)]))
PythonTableFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonTableFunction
j_table_function = PythonTableFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
j_function_kind,
self._deterministic,
self._takes_row_as_input,
_get_python_env())
return j_table_function
def _create_delegate_function(self) -> UserDefinedFunction:
return DelegationTableFunction(self._func)
class UserDefinedAggregateFunctionWrapper(UserDefinedFunctionWrapper):
"""
Wrapper for Python user-defined aggregate function or user-defined table aggregate function.
"""
def __init__(self, func, input_types, result_type, accumulator_type, func_type,
deterministic, name, is_table_aggregate=False):
super(UserDefinedAggregateFunctionWrapper, self).__init__(
func, input_types, func_type, deterministic, name)
if accumulator_type is None and func_type == "general":
accumulator_type = func.get_accumulator_type()
if result_type is None:
result_type = func.get_result_type()
if not isinstance(result_type, (DataType, str)):
raise TypeError(
"Invalid returnType: returnType should be DataType or str but is {}"
.format(result_type))
from pyflink.table.types import MapType
if func_type == 'pandas' and isinstance(result_type, MapType):
raise TypeError(
"Invalid returnType: Pandas UDAF doesn't support DataType type {} currently"
.format(result_type))
if accumulator_type is not None and not isinstance(accumulator_type, (DataType, str)):
raise TypeError(
"Invalid accumulator_type: accumulator_type should be DataType or str but is {}"
.format(accumulator_type))
if (func_type == "general" and
not (isinstance(result_type, str) and (accumulator_type, str) or
isinstance(result_type, DataType) and isinstance(accumulator_type, DataType))):
raise TypeError("result_type and accumulator_type should be DataType or str "
"at the same time.")
self._result_type = result_type
self._accumulator_type = accumulator_type
self._is_table_aggregate = is_table_aggregate
def _create_judf(self, serialized_func, j_input_types, j_function_kind):
if self._func_type == "pandas":
if isinstance(self._result_type, DataType):
from pyflink.table.types import DataTypes
self._accumulator_type = DataTypes.ARRAY(self._result_type)
else:
self._accumulator_type = 'ARRAY<{0}>'.format(self._result_type)
if j_input_types is not None:
gateway = get_gateway()
j_input_types = java_utils.to_jarray(
gateway.jvm.DataType, [_to_java_data_type(i) for i in self._input_types])
if isinstance(self._result_type, DataType):
j_result_type = _to_java_data_type(self._result_type)
else:
j_result_type = self._result_type
if isinstance(self._accumulator_type, DataType):
j_accumulator_type = _to_java_data_type(self._accumulator_type)
else:
j_accumulator_type = self._accumulator_type
gateway = get_gateway()
if self._is_table_aggregate:
PythonAggregateFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonTableAggregateFunction
else:
PythonAggregateFunction = gateway.jvm \
.org.apache.flink.table.functions.python.PythonAggregateFunction
j_aggregate_function = PythonAggregateFunction(
self._name,
bytearray(serialized_func),
j_input_types,
j_result_type,
j_accumulator_type,
j_function_kind,
self._deterministic,
self._takes_row_as_input,
_get_python_env())
return j_aggregate_function
def _create_delegate_function(self) -> UserDefinedFunction:
assert self._func_type == 'pandas'
return DelegatingPandasAggregateFunction(self._func)
# TODO: support to configure the python execution environment
def _get_python_env():
gateway = get_gateway()
exec_type = gateway.jvm.org.apache.flink.table.functions.python.PythonEnv.ExecType.PROCESS
return gateway.jvm.org.apache.flink.table.functions.python.PythonEnv(exec_type)
def _create_udf(f, input_types, result_type, func_type, deterministic, name):
return UserDefinedScalarFunctionWrapper(
f, input_types, result_type, func_type, deterministic, name)
def _create_udtf(f, input_types, result_types, deterministic, name):
return UserDefinedTableFunctionWrapper(f, input_types, result_types, deterministic, name)
def _create_udaf(f, input_types, result_type, accumulator_type, func_type, deterministic, name):
return UserDefinedAggregateFunctionWrapper(
f, input_types, result_type, accumulator_type, func_type, deterministic, name)
def _create_udtaf(f, input_types, result_type, accumulator_type, func_type, deterministic, name):
return UserDefinedAggregateFunctionWrapper(
f, input_types, result_type, accumulator_type, func_type, deterministic, name, True)
def udf(f: Union[Callable, ScalarFunction, Type] = None,
input_types: Union[List[DataType], DataType, str, List[str]] = None,
result_type: Union[DataType, str] = None,
deterministic: bool = None, name: str = None, func_type: str = "general",
udf_type: str = None) -> Union[UserDefinedScalarFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined function.
Example:
::
>>> add_one = udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT())
>>> # The input_types is optional.
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> # Specify result_type via string.
>>> @udf(result_type='BIGINT')
... def add(i, j):
... return i + j
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
:param f: lambda function or user-defined function.
:param input_types: optional, the input data types.
:param result_type: the result data type.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:param name: the function name.
:param func_type: the type of the python function, available value: general, pandas,
(default: general)
:param udf_type: the type of the python function, available value: general, pandas,
(default: general)
:return: UserDefinedScalarFunctionWrapper or function.
.. versionadded:: 1.10.0
"""
if udf_type:
import warnings
warnings.warn("The param udf_type is deprecated in 1.12. Use func_type instead.")
func_type = udf_type
if func_type not in ('general', 'pandas'):
raise ValueError("The func_type must be one of 'general, pandas', got %s."
% func_type)
# decorator
if f is None:
return functools.partial(_create_udf, input_types=input_types, result_type=result_type,
func_type=func_type, deterministic=deterministic,
name=name)
else:
return _create_udf(f, input_types, result_type, func_type, deterministic, name)
def udtf(f: Union[Callable, TableFunction, Type] = None,
input_types: Union[List[DataType], DataType, str, List[str]] = None,
result_types: Union[List[DataType], DataType, str, List[str]] = None,
deterministic: bool = None,
name: str = None) -> Union[UserDefinedTableFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined table function.
Example:
::
>>> # The input_types is optional.
>>> @udtf(result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()])
... def range_emit(s, e):
... for i in range(e):
... yield s, i
>>> # Specify result_types via string
>>> @udtf(result_types=['BIGINT', 'BIGINT'])
... def range_emit(s, e):
... for i in range(e):
... yield s, i
>>> # Specify result_types via row string
>>> @udtf(result_types='Row<a BIGINT, b BIGINT>')
... def range_emit(s, e):
... for i in range(e):
... yield s, i
>>> class MultiEmit(TableFunction):
... def eval(self, i):
... return range(i)
>>> multi_emit = udtf(MultiEmit(), DataTypes.BIGINT(), DataTypes.BIGINT())
:param f: user-defined table function.
:param input_types: optional, the input data types.
:param result_types: the result data types.
:param name: the function name.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:return: UserDefinedTableFunctionWrapper or function.
.. versionadded:: 1.11.0
"""
# decorator
if f is None:
return functools.partial(_create_udtf, input_types=input_types, result_types=result_types,
deterministic=deterministic, name=name)
else:
return _create_udtf(f, input_types, result_types, deterministic, name)
def udaf(f: Union[Callable, AggregateFunction, Type] = None,
input_types: Union[List[DataType], DataType, str, List[str]] = None,
result_type: Union[DataType, str] = None, accumulator_type: Union[DataType, str] = None,
deterministic: bool = None, name: str = None,
func_type: str = "general") -> Union[UserDefinedAggregateFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined aggregate function.
Example:
::
>>> # The input_types is optional.
>>> @udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
... def mean_udaf(v):
... return v.mean()
>>> # Specify result_type via string
>>> @udaf(result_type='FLOAT', func_type="pandas")
... def mean_udaf(v):
... return v.mean()
:param f: user-defined aggregate function.
:param input_types: optional, the input data types.
:param result_type: the result data type.
:param accumulator_type: optional, the accumulator data type.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:param name: the function name.
:param func_type: the type of the python function, available value: general, pandas,
(default: general)
:return: UserDefinedAggregateFunctionWrapper or function.
.. versionadded:: 1.12.0
"""
if func_type not in ('general', 'pandas'):
raise ValueError("The func_type must be one of 'general, pandas', got %s."
% func_type)
# decorator
if f is None:
return functools.partial(_create_udaf, input_types=input_types, result_type=result_type,
accumulator_type=accumulator_type, func_type=func_type,
deterministic=deterministic, name=name)
else:
return _create_udaf(f, input_types, result_type, accumulator_type, func_type,
deterministic, name)
def udtaf(f: Union[Callable, TableAggregateFunction, Type] = None,
input_types: Union[List[DataType], DataType, str, List[str]] = None,
result_type: Union[DataType, str] = None,
accumulator_type: Union[DataType, str] = None,
deterministic: bool = None, name: str = None,
func_type: str = 'general') -> Union[UserDefinedAggregateFunctionWrapper, Callable]:
"""
Helper method for creating a user-defined table aggregate function.
Example:
::
>>> # The input_types is optional.
>>> class Top2(TableAggregateFunction):
... def emit_value(self, accumulator):
... yield Row(accumulator[0])
... yield Row(accumulator[1])
...
... def create_accumulator(self):
... return [None, None]
...
... def accumulate(self, accumulator, *args):
... if args[0] is not None:
... if accumulator[0] is None or args[0] > accumulator[0]:
... accumulator[1] = accumulator[0]
... accumulator[0] = args[0]
... elif accumulator[1] is None or args[0] > accumulator[1]:
... accumulator[1] = args[0]
...
... def retract(self, accumulator, *args):
... accumulator[0] = accumulator[0] - 1
...
... def merge(self, accumulator, accumulators):
... for other_acc in accumulators:
... self.accumulate(accumulator, other_acc[0])
... self.accumulate(accumulator, other_acc[1])
...
... def get_accumulator_type(self):
... return 'ARRAY<BIGINT>'
...
... def get_result_type(self):
... return 'ROW<a BIGINT>'
>>> top2 = udtaf(Top2())
:param f: user-defined table aggregate function.
:param input_types: optional, the input data types.
:param result_type: the result data type.
:param accumulator_type: optional, the accumulator data type.
:param deterministic: the determinism of the function's results. True if and only if a call to
this function is guaranteed to always return the same result given the
same parameters. (default True)
:param name: the function name.
:param func_type: the type of the python function, available value: general
(default: general)
:return: UserDefinedAggregateFunctionWrapper or function.
.. versionadded:: 1.13.0
"""
if func_type != 'general':
raise ValueError("The func_type must be 'general', got %s."
% func_type)
if f is None:
return functools.partial(_create_udtaf, input_types=input_types, result_type=result_type,
accumulator_type=accumulator_type, func_type=func_type,
deterministic=deterministic, name=name)
else:
return _create_udtaf(f, input_types, result_type, accumulator_type, func_type,
deterministic, name)
| 34,227 | 38.939323 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/table_schema.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import List, Optional, Union
from pyflink.java_gateway import get_gateway
from pyflink.table.types import DataType, RowType, _to_java_data_type, _from_java_data_type
from pyflink.util.java_utils import to_jarray
__all__ = ['TableSchema']
class TableSchema(object):
"""
A table schema that represents a table's structure with field names and data types.
"""
def __init__(self, field_names: List[str] = None, data_types: List[DataType] = None,
j_table_schema=None):
if j_table_schema is None:
gateway = get_gateway()
j_field_names = to_jarray(gateway.jvm.String, field_names)
j_data_types = to_jarray(gateway.jvm.DataType,
[_to_java_data_type(item) for item in data_types])
self._j_table_schema = gateway.jvm.TableSchema.builder()\
.fields(j_field_names, j_data_types).build()
else:
self._j_table_schema = j_table_schema
def copy(self) -> 'TableSchema':
"""
Returns a deep copy of the table schema.
:return: A deep copy of the table schema.
"""
return TableSchema(j_table_schema=self._j_table_schema.copy())
def get_field_data_types(self) -> List[DataType]:
"""
Returns all field data types as a list.
:return: A list of all field data types.
"""
return [_from_java_data_type(item) for item in self._j_table_schema.getFieldDataTypes()]
def get_field_data_type(self, field: Union[int, str]) -> Optional[DataType]:
"""
Returns the specified data type for the given field index or field name.
:param field: The index of the field or the name of the field.
:return: The data type of the specified field.
"""
if not isinstance(field, (int, str)):
raise TypeError("Expected field index or field name, got %s" % type(field))
optional_result = self._j_table_schema.getFieldDataType(field)
if optional_result.isPresent():
return _from_java_data_type(optional_result.get())
else:
return None
def get_field_count(self) -> int:
"""
Returns the number of fields.
:return: The number of fields.
"""
return self._j_table_schema.getFieldCount()
def get_field_names(self) -> List[str]:
"""
Returns all field names as a list.
:return: The list of all field names.
"""
return list(self._j_table_schema.getFieldNames())
def get_field_name(self, field_index: int) -> Optional[str]:
"""
Returns the specified name for the given field index.
:param field_index: The index of the field.
:return: The field name.
"""
optional_result = self._j_table_schema.getFieldName(field_index)
if optional_result.isPresent():
return optional_result.get()
else:
return None
def to_row_data_type(self) -> RowType:
"""
Converts a table schema into a (nested) data type describing a
:func:`pyflink.table.types.DataTypes.ROW`.
:return: The row data type.
"""
return _from_java_data_type(self._j_table_schema.toRowDataType())
def __repr__(self):
return self._j_table_schema.toString()
def __eq__(self, other):
return isinstance(other, self.__class__) and self._j_table_schema == other._j_table_schema
def __hash__(self):
return self._j_table_schema.hashCode()
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def builder(cls):
return TableSchema.Builder()
class Builder(object):
"""
Builder for creating a :class:`TableSchema`.
"""
def __init__(self):
self._field_names = []
self._field_data_types = []
def field(self, name: str, data_type: DataType) -> 'TableSchema.Builder':
"""
Add a field with name and data type.
The call order of this method determines the order of fields in the schema.
:param name: The field name.
:param data_type: The field data type.
:return: This object.
"""
assert name is not None
assert data_type is not None
self._field_names.append(name)
self._field_data_types.append(data_type)
return self
def build(self) -> 'TableSchema':
"""
Returns a :class:`TableSchema` instance.
:return: The :class:`TableSchema` instance.
"""
return TableSchema(self._field_names, self._field_data_types)
| 5,727 | 34.57764 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/table_result.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Optional
from py4j.java_gateway import get_method
from pyflink.common.types import RowKind
from pyflink.common import Row
from pyflink.common.job_client import JobClient
from pyflink.java_gateway import get_gateway
from pyflink.table.result_kind import ResultKind
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import _from_java_data_type
from pyflink.table.utils import pickled_bytes_to_python_converter
__all__ = ['TableResult', 'CloseableIterator']
class TableResult(object):
"""
A :class:`~pyflink.table.TableResult` is the representation of the statement execution result.
.. versionadded:: 1.11.0
"""
def __init__(self, j_table_result):
self._j_table_result = j_table_result
def get_job_client(self) -> Optional[JobClient]:
"""
For DML and DQL statement, return the JobClient which associates the submitted Flink job.
For other statements (e.g. DDL, DCL) return empty.
:return: The job client, optional.
:rtype: pyflink.common.JobClient
.. versionadded:: 1.11.0
"""
job_client = self._j_table_result.getJobClient()
if job_client.isPresent():
return JobClient(job_client.get())
else:
return None
def wait(self, timeout_ms: int = None):
"""
Wait if necessary for at most the given time (milliseconds) for the data to be ready.
For a select operation, this method will wait until the first row can be accessed locally.
For an insert operation, this method will wait for the job to finish,
because the result contains only one row.
For other operations, this method will return immediately,
because the result is already available locally.
.. versionadded:: 1.12.0
"""
if timeout_ms:
TimeUnit = get_gateway().jvm.java.util.concurrent.TimeUnit
get_method(self._j_table_result, "await")(timeout_ms, TimeUnit.MILLISECONDS)
else:
get_method(self._j_table_result, "await")()
def get_table_schema(self) -> TableSchema:
"""
Get the schema of result.
The schema of DDL, USE, EXPLAIN:
::
+-------------+-------------+----------+
| column name | column type | comments |
+-------------+-------------+----------+
| result | STRING | |
+-------------+-------------+----------+
The schema of SHOW:
::
+---------------+-------------+----------+
| column name | column type | comments |
+---------------+-------------+----------+
| <object name> | STRING | |
+---------------+-------------+----------+
The column name of `SHOW CATALOGS` is "catalog name",
the column name of `SHOW DATABASES` is "database name",
the column name of `SHOW TABLES` is "table name",
the column name of `SHOW VIEWS` is "view name",
the column name of `SHOW FUNCTIONS` is "function name".
The schema of DESCRIBE:
::
+------------------+-------------+-------------------------------------------------+
| column name | column type | comments |
+------------------+-------------+-------------------------------------------------+
| name | STRING | field name |
+------------------+-------------+-------------------------------------------------+
| type | STRING | field type expressed as a String |
+------------------+-------------+-------------------------------------------------+
| null | BOOLEAN | field nullability: true if a field is nullable, |
| | | else false |
+------------------+-------------+-------------------------------------------------+
| key | BOOLEAN | key constraint: 'PRI' for primary keys, |
| | | 'UNQ' for unique keys, else null |
+------------------+-------------+-------------------------------------------------+
| computed column | STRING | computed column: string expression |
| | | if a field is computed column, else null |
+------------------+-------------+-------------------------------------------------+
| watermark | STRING | watermark: string expression if a field is |
| | | watermark, else null |
+------------------+-------------+-------------------------------------------------+
The schema of INSERT: (one column per one sink)
::
+----------------------------+-------------+-----------------------+
| column name | column type | comments |
+----------------------------+-------------+-----------------------+
| (name of the insert table) | BIGINT | the insert table name |
+----------------------------+-------------+-----------------------+
The schema of SELECT is the selected field names and types.
:return: The schema of result.
:rtype: pyflink.table.TableSchema
.. versionadded:: 1.11.0
"""
return TableSchema(j_table_schema=self._get_java_table_schema())
def get_result_kind(self) -> ResultKind:
"""
Return the ResultKind which represents the result type.
For DDL operation and USE operation, the result kind is always SUCCESS.
For other operations, the result kind is always SUCCESS_WITH_CONTENT.
:return: The result kind.
.. versionadded:: 1.11.0
"""
return ResultKind._from_j_result_kind(self._j_table_result.getResultKind())
def collect(self) -> 'CloseableIterator':
"""
Get the result contents as a closeable row iterator.
Note:
For SELECT operation, the job will not be finished unless all result data has been
collected. So we should actively close the job to avoid resource leak through
CloseableIterator#close method. Calling CloseableIterator#close method will cancel the job
and release related resources.
For DML operation, Flink does not support getting the real affected row count now. So the
affected row count is always -1 (unknown) for every sink, and them will be returned until
the job is finished.
Calling CloseableIterator#close method will cancel the job.
For other operations, no flink job will be submitted (get_job_client() is always empty), and
the result is bounded. Do noting when calling CloseableIterator#close method.
Recommended code to call CloseableIterator#close method looks like:
>>> table_result = t_env.execute("select ...")
>>> with table_result.collect() as results:
>>> for result in results:
>>> ...
In order to fetch result to local, you can call either collect() and print(). But, they can
not be called both on the same TableResult instance.
:return: A CloseableIterator.
.. versionadded:: 1.12.0
"""
field_data_types = self._get_java_table_schema().getFieldDataTypes()
j_iter = self._j_table_result.collect()
return CloseableIterator(j_iter, field_data_types)
def print(self):
"""
Print the result contents as tableau form to client console.
This method has slightly different behaviors under different checkpointing settings.
- For batch jobs or streaming jobs without checkpointing,
this method has neither exactly-once nor at-least-once guarantee.
Query results are immediately accessible by the clients once they're produced,
but exceptions will be thrown when the job fails and restarts.
- For streaming jobs with exactly-once checkpointing,
this method guarantees an end-to-end exactly-once record delivery.
A result will be accessible by clients only after its corresponding checkpoint
completes.
- For streaming jobs with at-least-once checkpointing,
this method guarantees an end-to-end at-least-once record delivery.
Query results are immediately accessible by the clients once they're produced,
but it is possible for the same result to be delivered multiple times.
.. versionadded:: 1.11.0
"""
self._j_table_result.print()
def _get_java_table_schema(self):
TableSchema = get_gateway().jvm.org.apache.flink.table.api.TableSchema
return TableSchema.fromResolvedSchema(self._j_table_result.getResolvedSchema())
class CloseableIterator(object):
"""
Representing an Iterator that is also auto closeable.
"""
def __init__(self, j_closeable_iterator, field_data_types):
self._j_closeable_iterator = j_closeable_iterator
self._j_field_data_types = field_data_types
self._data_types = [_from_java_data_type(j_field_data_type)
for j_field_data_type in self._j_field_data_types]
def __iter__(self):
return self
def __next__(self):
if not self._j_closeable_iterator.hasNext():
raise StopIteration("No more data.")
gateway = get_gateway()
pickle_bytes = gateway.jvm.PythonBridgeUtils. \
getPickledBytesFromRow(self._j_closeable_iterator.next(),
self._j_field_data_types)
row_kind = RowKind(int.from_bytes(pickle_bytes[0], byteorder='big', signed=False))
pickle_bytes = list(pickle_bytes[1:])
field_data = zip(pickle_bytes, self._data_types)
fields = []
for data, field_type in field_data:
if len(data) == 0:
fields.append(None)
else:
fields.append(pickled_bytes_to_python_converter(data, field_type))
result_row = Row(*fields)
result_row.set_row_kind(row_kind)
return result_row
def next(self):
return self.__next__()
def close(self):
self._j_closeable_iterator.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
| 11,783 | 42.644444 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/expressions.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Union
from pyflink import add_version_doc
from pyflink.java_gateway import get_gateway
from pyflink.table.expression import Expression, _get_java_expression, TimePointUnit, JsonOnNull
from pyflink.table.types import _to_java_data_type, DataType
from pyflink.table.udf import UserDefinedFunctionWrapper
from pyflink.util.java_utils import to_jarray, load_java_class
__all__ = ['if_then_else', 'lit', 'col', 'range_', 'and_', 'or_', 'not_', 'UNBOUNDED_ROW',
'UNBOUNDED_RANGE', 'CURRENT_ROW', 'CURRENT_RANGE', 'current_database',
'current_date', 'current_time', 'current_timestamp',
'current_watermark', 'local_time', 'local_timestamp',
'temporal_overlaps', 'date_format', 'timestamp_diff', 'array', 'row', 'map_',
'row_interval', 'pi', 'e', 'rand', 'rand_integer', 'atan2', 'negative', 'concat',
'concat_ws', 'uuid', 'null_of', 'log', 'with_columns', 'without_columns', 'json_string',
'json_object', 'json_object_agg', 'json_array', 'json_array_agg', 'call', 'call_sql',
'source_watermark', 'to_timestamp_ltz', 'from_unixtime', 'to_date', 'to_timestamp',
'convert_tz', 'unix_timestamp']
def _leaf_op(op_name: str) -> Expression:
gateway = get_gateway()
return Expression(getattr(gateway.jvm.Expressions, op_name)())
def _unary_op(op_name: str, arg) -> Expression:
gateway = get_gateway()
return Expression(getattr(gateway.jvm.Expressions, op_name)(_get_java_expression(arg)))
def _binary_op(op_name: str, first, second) -> Expression:
gateway = get_gateway()
return Expression(getattr(gateway.jvm.Expressions, op_name)(
_get_java_expression(first),
_get_java_expression(second)))
def _ternary_op(op_name: str, first, second, third) -> Expression:
gateway = get_gateway()
return Expression(getattr(gateway.jvm.Expressions, op_name)(
_get_java_expression(first),
_get_java_expression(second),
_get_java_expression(third)))
def _quaternion_op(op_name: str, first, second, third, forth) -> Expression:
gateway = get_gateway()
return Expression(getattr(gateway.jvm.Expressions, op_name)(
_get_java_expression(first),
_get_java_expression(second),
_get_java_expression(third),
_get_java_expression(forth)))
def _varargs_op(op_name: str, *args):
gateway = get_gateway()
return Expression(
getattr(gateway.jvm.Expressions, op_name)(*[_get_java_expression(arg) for arg in args]))
def _add_version_doc():
from inspect import getmembers, isfunction
from pyflink.table import expressions
for o in getmembers(expressions):
if isfunction(o[1]) and not o[0].startswith('_'):
add_version_doc(o[1], "1.12.0")
def col(name: str) -> Expression:
"""
Creates an expression which refers to a table's column.
Example:
::
>>> tab.select(col("key"), col("value"))
:param name: the field name to refer to
"""
return _unary_op("col", name)
def lit(v, data_type: DataType = None) -> Expression:
"""
Creates a SQL literal.
The data type is derived from the object's class and its value. For example, `lit(12)` leads
to `INT`, `lit("abc")` leads to `CHAR(3)`.
Example:
::
>>> tab.select(col("key"), lit("abc"))
"""
if data_type is None:
return _unary_op("lit", v)
else:
return _binary_op("lit", v, _to_java_data_type(data_type))
def range_(start: Union[str, int], end: Union[str, int]) -> Expression:
"""
Indicates a range from 'start' to 'end', which can be used in columns selection.
Example:
::
>>> tab.select(with_columns(range_('b', 'c')))
.. seealso:: :func:`~pyflink.table.expressions.with_columns`
"""
return _binary_op("range", start, end)
def and_(predicate0: Union[bool, Expression[bool]],
predicate1: Union[bool, Expression[bool]],
*predicates: Union[bool, Expression[bool]]) -> Expression[bool]:
"""
Boolean AND in three-valued logic.
"""
gateway = get_gateway()
predicates = to_jarray(gateway.jvm.Object, [_get_java_expression(p) for p in predicates])
return _ternary_op("and", predicate0, predicate1, predicates)
def or_(predicate0: Union[bool, Expression[bool]],
predicate1: Union[bool, Expression[bool]],
*predicates: Union[bool, Expression[bool]]) -> Expression[bool]:
"""
Boolean OR in three-valued logic.
"""
gateway = get_gateway()
predicates = to_jarray(gateway.jvm.Object, [_get_java_expression(p) for p in predicates])
return _ternary_op("or", predicate0, predicate1, predicates)
def not_(expression: Expression[bool]) -> Expression[bool]:
"""
Inverts a given boolean expression.
This method supports a three-valued logic by preserving `NULL`. This means if the input
expression is `NULL`, the result will also be `NULL`.
The resulting type is nullable if and only if the input type is nullable.
Examples:
::
>>> not_(lit(True)) # False
>>> not_(lit(False)) # True
>>> not_(lit(None, DataTypes.BOOLEAN())) # None
"""
return _unary_op("not", expression)
"""
Offset constant to be used in the `preceding` clause of unbounded
:class:`~pyflink.table.window.Over`. Use this constant for a time interval.
Unbounded over windows start with the first row of a partition.
.. versionadded:: 1.12.0
"""
UNBOUNDED_ROW = Expression("UNBOUNDED_ROW") # type: Expression
"""
Offset constant to be used in the `preceding` clause of unbounded
:class:`~pyflink.table.window.Over` windows. Use this constant for a row-count interval.
Unbounded over windows start with the first row of a partition.
.. versionadded:: 1.12.0
"""
UNBOUNDED_RANGE = Expression("UNBOUNDED_RANGE") # type: Expression
"""
Offset constant to be used in the `following` clause of :class:`~pyflink.table.window.Over` windows.
Use this for setting the upper bound of the window to the current row.
.. versionadded:: 1.12.0
"""
CURRENT_ROW = Expression("CURRENT_ROW") # type: Expression
"""
Offset constant to be used in the `following` clause of :class:`~pyflink.table.window.Over` windows.
Use this for setting the upper bound of the window to the sort key of the current row, i.e.,
all rows with the same sort key as the current row are included in the window.
.. versionadded:: 1.12.0
"""
CURRENT_RANGE = Expression("CURRENT_RANGE") # type: Expression
def current_database() -> Expression:
"""
Returns the current database
"""
return _leaf_op("currentDatabase")
def current_date() -> Expression:
"""
Returns the current SQL date in local time zone.
"""
return _leaf_op("currentDate")
def current_time() -> Expression:
"""
Returns the current SQL time in local time zone.
"""
return _leaf_op("currentTime")
def current_timestamp() -> Expression:
"""
Returns the current SQL timestamp in local time zone,
the return type of this expression is TIMESTAMP_LTZ.
"""
return _leaf_op("currentTimestamp")
def current_watermark(rowtimeAttribute) -> Expression:
"""
Returns the current watermark for the given rowtime attribute, or NULL if no common watermark of
all upstream operations is available at the current operation in the pipeline.
The function returns the watermark with the same type as the rowtime attribute, but with an
adjusted precision of 3. For example, if the rowtime attribute is `TIMESTAMP_LTZ(9)`, the
function will return `TIMESTAMP_LTZ(3)`.
If no watermark has been emitted yet, the function will return `NULL`. Users must take care of
this when comparing against it, e.g. in order to filter out late data you can use
::
WHERE CURRENT_WATERMARK(ts) IS NULL OR ts > CURRENT_WATERMARK(ts)
"""
return _unary_op("currentWatermark", rowtimeAttribute)
def local_time() -> Expression:
"""
Returns the current SQL time in local time zone.
"""
return _leaf_op("localTime")
def local_timestamp() -> Expression:
"""
Returns the current SQL timestamp in local time zone,
the return type of this expression s TIMESTAMP.
"""
return _leaf_op("localTimestamp")
def to_date(date_str: Union[str, Expression[str]],
format: Union[str, Expression[str]] = None) -> Expression:
"""
Converts the date string with the given format (by default 'yyyy-MM-dd') to a date.
:param date_str: The date string
:param format: The format of the string
:return: The date value with DATE type.
"""
if format is None:
return _unary_op("toDate", date_str)
else:
return _binary_op("toDate", date_str, format)
def to_timestamp(timestamp_str: Union[str, Expression[str]],
format: Union[str, Expression[str]] = None) -> Expression:
"""
Converts the date time string with the given format (by default: 'yyyy-MM-dd HH:mm:ss')
under the 'UTC+0' time zone to a timestamp.
:param timestamp_str: The date time string
:param format: The format of the string
:return: The date value with TIMESTAMP type.
"""
if format is None:
return _unary_op("toTimestamp", timestamp_str)
else:
return _binary_op("toTimestamp", timestamp_str, format)
def to_timestamp_ltz(numeric_epoch_time, precision) -> Expression:
"""
Converts a numeric type epoch time to TIMESTAMP_LTZ.
The supported precision is 0 or 3:
0 means the numericEpochTime is in second.
3 means the numericEpochTime is in millisecond.
:param numeric_epoch_time: The epoch time with numeric type
:param precision: The precision to indicate the epoch time is in second or millisecond
:return: The timestamp value with TIMESTAMP_LTZ type.
"""
return _binary_op("toTimestampLtz", numeric_epoch_time, precision)
def temporal_overlaps(left_time_point,
left_temporal,
right_time_point,
right_temporal) -> Expression:
"""
Determines whether two anchored time intervals overlap. Time point and temporal are
transformed into a range defined by two time points (start, end). The function
evaluates `left_end >= right_start && right_end >= left_start`.
e.g.
temporal_overlaps(
lit("2:55:00").to_time,
lit(1).hours,
lit("3:30:00").to_time,
lit(2).hours) leads to true.
:param left_time_point: The left time point
:param left_temporal: The time interval from the left time point
:param right_time_point: The right time point
:param right_temporal: The time interval from the right time point
:return: An expression which indicates whether two anchored time intervals overlap.
"""
return _quaternion_op("temporalOverlaps",
left_time_point, left_temporal, right_time_point, right_temporal)
def date_format(timestamp, format) -> Expression:
"""
Formats a timestamp as a string using a specified format.
The format must be compatible with MySQL's date formatting syntax as used by the
date_parse function.
For example `date_format(col("time"), "%Y, %d %M")` results in strings formatted as
"2017, 05 May".
:param timestamp: The timestamp to format as string.
:param format: The format of the string.
:return: The formatted timestamp as string.
"""
return _binary_op("dateFormat", timestamp, format)
def timestamp_diff(time_point_unit: TimePointUnit, time_point1, time_point2) -> Expression:
"""
Returns the (signed) number of :class:`~pyflink.table.expression.TimePointUnit` between
time_point1 and time_point2.
For example,
`timestamp_diff(TimePointUnit.DAY, lit("2016-06-15").to_date, lit("2016-06-18").to_date`
leads to 3.
:param time_point_unit: The unit to compute diff.
:param time_point1: The first point in time.
:param time_point2: The second point in time.
:return: The number of intervals as integer value.
"""
return _ternary_op("timestampDiff", time_point_unit._to_j_time_point_unit(),
time_point1, time_point2)
def convert_tz(date_str: Union[str, Expression[str]],
tz_from: Union[str, Expression[str]],
tz_to: Union[str, Expression[str]]) -> Expression:
"""
Converts a datetime string date_str (with default ISO timestamp format 'yyyy-MM-dd HH:mm:ss')
from time zone tz_from to time zone tz_to. The format of time zone should be either an
abbreviation such as "PST", a full name such as "America/Los_Angeles", or a custom ID such as
"GMT-08:00". E.g., convert_tz('1970-01-01 00:00:00', 'UTC', 'America/Los_Angeles') returns
'1969-12-31 16:00:00'.
Example:
::
>>> tab.select(convert_tz(col('a'), 'PST', 'UTC'))
:param date_str: the date time string
:param tz_from: the original time zone
:param tz_to: the target time zone
:return: The formatted timestamp as string.
"""
return _ternary_op("convertTz", date_str, tz_from, tz_to)
def from_unixtime(unixtime, format=None) -> Expression:
"""
Converts unix timestamp (seconds since '1970-01-01 00:00:00' UTC) to datetime string the given
format. The default format is "yyyy-MM-dd HH:mm:ss".
"""
if format is None:
return _unary_op("fromUnixtime", unixtime)
else:
return _binary_op("fromUnixtime", unixtime, format)
def unix_timestamp(date_str: Union[str, Expression[str]] = None,
format: Union[str, Expression[str]] = None) -> Expression:
"""
Gets the current unix timestamp in seconds if no arguments are not specified.
This function is not deterministic which means the value would be recalculated for each record.
If the date time string date_str is specified, it will convert the given date time string
in the specified format (by default: yyyy-MM-dd HH:mm:ss if not specified) to unix timestamp
(in seconds), using the specified timezone in table config.
"""
if date_str is None:
return _leaf_op("unixTimestamp")
elif format is None:
return _unary_op("unixTimestamp", date_str)
else:
return _binary_op("unixTimestamp", date_str, format)
def array(head, *tail) -> Expression:
"""
Creates an array of literals.
Example:
::
>>> tab.select(array(1, 2, 3))
"""
gateway = get_gateway()
tail = to_jarray(gateway.jvm.Object, [_get_java_expression(t) for t in tail])
return _binary_op("array", head, tail)
def row(head, *tail) -> Expression:
"""
Creates a row of expressions.
Example:
::
>>> tab.select(row("key1", 1))
"""
gateway = get_gateway()
tail = to_jarray(gateway.jvm.Object, [_get_java_expression(t) for t in tail])
return _binary_op("row", head, tail)
def map_(key, value, *tail) -> Expression:
"""
Creates a map of expressions.
Example:
::
>>> tab.select(
>>> map_(
>>> "key1", 1,
>>> "key2", 2,
>>> "key3", 3
>>> ))
.. note::
keys and values should have the same types for all entries.
"""
gateway = get_gateway()
tail = to_jarray(gateway.jvm.Object, [_get_java_expression(t) for t in tail])
return _ternary_op("map", key, value, tail)
def map_from_arrays(key, value) -> Expression:
"""
Creates a map from an array of keys and an array of values.
Example:
::
>>> tab.select(
>>> map_from_arrays(
>>> array("key1", "key2", "key3"),
>>> array(1, 2, 3)
>>> ))
.. note::
both arrays should have the same length.
"""
return _binary_op("mapFromArrays", key, value)
def row_interval(rows: int) -> Expression:
"""
Creates an interval of rows.
Example:
::
>>> tab.window(Over
>>> .partition_by(col('a'))
>>> .order_by(col('proctime'))
>>> .preceding(row_interval(4))
>>> .following(CURRENT_ROW)
>>> .alias('w'))
:param rows: the number of rows
"""
return _unary_op("rowInterval", rows)
def pi() -> Expression[float]:
"""
Returns a value that is closer than any other value to `pi`.
"""
return _leaf_op("pi")
def e() -> Expression[float]:
"""
Returns a value that is closer than any other value to `e`.
"""
return _leaf_op("e")
def rand(seed: Union[int, Expression[int]] = None) -> Expression[float]:
"""
Returns a pseudorandom double value between 0.0 (inclusive) and 1.0 (exclusive) with a
initial seed if specified. Two rand() functions will return identical sequences of numbers if
they have same initial seed.
"""
if seed is None:
return _leaf_op("rand")
else:
return _unary_op("rand", seed)
def rand_integer(bound: Union[int, Expression[int]],
seed: Union[int, Expression[int]] = None) -> Expression:
"""
Returns a pseudorandom integer value between 0 (inclusive) and the specified value
(exclusive) with a initial seed if specified. Two rand_integer() functions will return
identical sequences of numbers if they have same initial seed and same bound.
"""
if seed is None:
return _unary_op("randInteger", bound)
else:
return _binary_op("randInteger", seed, bound)
def atan2(y, x) -> Expression[float]:
"""
Calculates the arc tangent of a given coordinate.
"""
return _binary_op("atan2", y, x)
def negative(v) -> Expression:
"""
Returns negative numeric.
"""
return _unary_op("negative", v)
def concat(first: Union[str, Expression[str]],
*others: Union[str, Expression[str]]) -> Expression[str]:
"""
Returns the string that results from concatenating the arguments.
Returns NULL if any argument is NULL.
"""
gateway = get_gateway()
return _binary_op("concat",
first,
to_jarray(gateway.jvm.Object,
[_get_java_expression(other) for other in others]))
def concat_ws(separator: Union[str, Expression[str]],
first: Union[str, Expression[str]],
*others: Union[str, Expression[str]]) -> Expression[str]:
"""
Returns the string that results from concatenating the arguments and separator.
Returns NULL If the separator is NULL.
.. note::
this function does not skip empty strings. However, it does skip any NULL
values after the separator argument.
"""
gateway = get_gateway()
return _ternary_op("concatWs",
separator,
first,
to_jarray(gateway.jvm.Object,
[_get_java_expression(other) for other in others]))
def uuid() -> Expression[str]:
"""
Returns an UUID (Universally Unique Identifier) string (e.g.,
"3d3c68f7-f608-473f-b60c-b0c44ad4cc4e") according to RFC 4122 type 4 (pseudo randomly
generated) UUID. The UUID is generated using a cryptographically strong pseudo random number
generator.
"""
return _leaf_op("uuid")
def null_of(data_type: DataType) -> Expression:
"""
Returns a null literal value of a given data type.
"""
return _unary_op("nullOf", _to_java_data_type(data_type))
def log(v, base=None) -> Expression[float]:
"""
If base is specified, calculates the logarithm of the given value to the given base.
Otherwise, calculates the natural logarithm of the given value.
"""
if base is None:
return _unary_op("log", v)
else:
return _binary_op("log", base, v)
def source_watermark() -> Expression:
"""
Source watermark declaration for schema.
This is a marker function that doesn't have concrete runtime implementation. It can only
be used as a single expression for watermark strategies in schema declarations. The declaration
will be pushed down into a table source that implements the `SupportsSourceWatermark`
interface. The source will emit system-defined watermarks afterwards.
Please check the documentation whether the connector supports source watermarks.
"""
return _leaf_op("sourceWatermark")
def if_then_else(condition: Union[bool, Expression[bool]], if_true, if_false) -> Expression:
"""
Ternary conditional operator that decides which of two other expressions should be evaluated
based on a evaluated boolean condition.
e.g. if_then_else(col("f0") > 5, "A", "B") leads to "A"
:param condition: condition boolean condition
:param if_true: expression to be evaluated if condition holds
:param if_false: expression to be evaluated if condition does not hold
"""
return _ternary_op("ifThenElse", condition, if_true, if_false)
def coalesce(*args) -> Expression:
"""
Returns the first argument that is not NULL.
If all arguments are NULL, it returns NULL as well.
The return type is the least restrictive, common type of all of its arguments.
The return type is nullable if all arguments are nullable as well.
Examples:
::
>>> coalesce(None, "default") # Returns "default"
>>> # Returns the first non-null value among f0 and f1,
>>> # or "default" if f0 and f1 are both null
>>> coalesce(col("f0"), col("f1"), "default")
:param args: the input expressions.
"""
gateway = get_gateway()
args = to_jarray(gateway.jvm.Object, [_get_java_expression(arg) for arg in args])
return _unary_op("coalesce", args)
def with_columns(head, *tails) -> Expression:
"""
Creates an expression that selects a range of columns. It can be used wherever an array of
expression is accepted such as function calls, projections, or groupings.
A range can either be index-based or name-based. Indices start at 1 and boundaries are
inclusive.
e.g. with_columns(range_("b", "c")) or with_columns(col("*"))
.. seealso:: :func:`~pyflink.table.expressions.range_`,
:func:`~pyflink.table.expressions.without_columns`
"""
gateway = get_gateway()
tails = to_jarray(gateway.jvm.Object, [_get_java_expression(t) for t in tails])
return _binary_op("withColumns", head, tails)
def without_columns(head, *tails) -> Expression:
"""
Creates an expression that selects all columns except for the given range of columns. It can
be used wherever an array of expression is accepted such as function calls, projections, or
groupings.
A range can either be index-based or name-based. Indices start at 1 and boundaries are
inclusive.
e.g. without_columns(range_("b", "c")) or without_columns(col("c"))
.. seealso:: :func:`~pyflink.table.expressions.range_`,
:func:`~pyflink.table.expressions.with_columns`
"""
gateway = get_gateway()
tails = to_jarray(gateway.jvm.Object, [_get_java_expression(t) for t in tails])
return _binary_op("withoutColumns", head, tails)
def json_string(value) -> Expression:
"""
Serializes a value into JSON.
This function returns a JSON string containing the serialized value. If the value is `NULL`,
the function returns `NULL`.
Examples:
::
>>> json_string(null_of(DataTypes.INT())) # None
>>> json_string(1) # '1'
>>> json_string(True) # 'true'
>>> json_string("Hello, World!") # '"Hello, World!"'
>>> json_string([1, 2]) # '[1,2]'
"""
return _unary_op("jsonString", value)
def json_object(on_null: JsonOnNull = JsonOnNull.NULL, *args) -> Expression:
"""
Builds a JSON object string from a list of key-value pairs.
`args` is an even-numbered list of alternating key/value pairs. Note that keys must be
non-`NULL` string literals, while values may be arbitrary expressions.
This function returns a JSON string. The `on_null` behavior defines how to treat `NULL` values.
Values which are created from another JSON construction function call (`json_object`,
`json_array`) are inserted directly rather than as a string. This allows building nested JSON
structures.
Examples:
::
>>> json_object() # '{}'
>>> json_object(JsonOnNull.NULL, "K1", "V1", "K2", "V2") # '{"K1":"V1","K2":"V2"}'
>>> # Expressions as values
>>> json_object(JsonOnNull.NULL, "orderNo", col("orderId"))
>>> json_object(JsonOnNull.NULL, "K1", null_of(DataTypes.STRING())) # '{"K1":null}'
>>> json_object(JsonOnNull.ABSENT, "K1", null_of(DataTypes.STRING())) # '{}'
>>> # '{"K1":{"K2":"V"}}'
>>> json_object(JsonOnNull.NULL, "K1", json_object(JsonOnNull.NULL, "K2", "V"))
.. seealso:: :func:`~pyflink.table.expressions.json_array`
"""
return _varargs_op("jsonObject", *(on_null._to_j_json_on_null(), *args))
def json_object_agg(on_null: JsonOnNull,
key_expr: Union[str, Expression[str]],
value_expr) -> Expression:
"""
Builds a JSON object string by aggregating key-value expressions into a single JSON object.
The key expression must return a non-nullable character string. Value expressions can be
arbitrary, including other JSON functions. If a value is `NULL`, the `on_null` behavior defines
what to do.
Note that keys must be unique. If a key occurs multiple times, an error will be thrown.
This function is currently not supported in `OVER` windows.
Examples:
::
>>> # '{"Apple":2,"Banana":17,"Orange":0}'
>>> orders.select(json_object_agg(JsonOnNull.NULL, col("product"), col("cnt")))
"""
return _ternary_op("jsonObjectAgg", on_null._to_j_json_on_null(), key_expr, value_expr)
def json_array(on_null: JsonOnNull = JsonOnNull.ABSENT, *args) -> Expression:
"""
Builds a JSON array string from a list of values.
This function returns a JSON string. The values can be arbitrary expressions. The `on_null`
behavior defines how to treat `NULL` values.
Elements which are created from another JSON construction function call (`json_object`,
`json_array`) are inserted directly rather than as a string. This allows building nested JSON
structures.
Examples:
::
>>> json_array() # '[]'
>>> json_array(JsonOnNull.NULL, 1, "2") # '[1,"2"]'
>>> # Expressions as values
>>> json_array(JsonOnNull.NULL, col("orderId"))
>>> json_array(JsonOnNull.NULL, null_of(DataTypes.STRING())) # '[null]'
>>> json_array(JsonOnNull.ABSENT, null_of(DataTypes.STRING())) # '[]'
>>> json_array(JsonOnNull.NULL, json_array(JsonOnNull.NULL, 1)) # '[[1]]'
.. seealso:: :func:`~pyflink.table.expressions.json_object`
"""
return _varargs_op("jsonArray", *(on_null._to_j_json_on_null(), *args))
def json_array_agg(on_null: JsonOnNull, item_expr) -> Expression:
"""
Builds a JSON object string by aggregating items into an array.
Item expressions can be arbitrary, including other JSON functions. If a value is `NULL`, the
`on_null` behavior defines what to do.
This function is currently not supported in `OVER` windows, unbounded session windows, or hop
windows.
Examples:
::
>>> # '["Apple","Banana","Orange"]'
>>> orders.select(json_array_agg(JsonOnNull.NULL, col("product")))
"""
return _binary_op("jsonArrayAgg", on_null._to_j_json_on_null(), item_expr)
def call(f: Union[str, UserDefinedFunctionWrapper], *args) -> Expression:
"""
The first parameter `f` could be a str or a Python user-defined function.
When it is str, this is a call to a function that will be looked up in a catalog. There
are two kinds of functions:
- System functions - which are identified with one part names
- Catalog functions - which are identified always with three parts names
(catalog, database, function)
Moreover each function can either be a temporary function or permanent one
(which is stored in an external catalog).
Based on that two properties the resolution order for looking up a function based on
the provided `function_name` is following:
- Temporary system function
- System function
- Temporary catalog function
- Catalog function
:param f: the path of the function or the Python user-defined function.
:param args: parameters of the user-defined function.
"""
gateway = get_gateway()
if isinstance(f, str):
return Expression(gateway.jvm.Expressions.call(
f, to_jarray(gateway.jvm.Object, [_get_java_expression(arg) for arg in args])))
expressions_clz = load_java_class("org.apache.flink.table.api.Expressions")
function_definition_clz = load_java_class('org.apache.flink.table.functions.FunctionDefinition')
j_object_array_type = to_jarray(gateway.jvm.Object, []).getClass()
api_call_method = expressions_clz.getDeclaredMethod(
"apiCall",
to_jarray(gateway.jvm.Class, [function_definition_clz, j_object_array_type]))
api_call_method.setAccessible(True)
return Expression(api_call_method.invoke(
None,
to_jarray(gateway.jvm.Object,
[f._java_user_defined_function(),
to_jarray(gateway.jvm.Object, [_get_java_expression(arg) for arg in args])])))
def call_sql(sql_expression: str) -> Expression:
"""
A call to a SQL expression.
The given string is parsed and translated into a Table API expression during planning. Only
the translated expression is evaluated during runtime.
Note: Currently, calls are limited to simple scalar expressions. Calls to aggregate or
table-valued functions are not supported. Sub-queries are also not allowed.
:param sql_expression: SQL expression to be translated
"""
return _unary_op("callSql", sql_expression)
_add_version_doc()
| 31,176 | 33.073224 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Entry point classes of Flink Table API:
- :class:`TableEnvironment` and :class:`StreamTableEnvironment`
Main entry point for Flink Table API & SQL functionality. :class:`TableEnvironment` is used
in pure Table API & SQL jobs. Meanwhile, :class:`StreamTableEnvironment` needs to be used when
mixing use of Table API and DataStream API.
- :class:`Table`
The core component of the Table API. Use the methods of :class:`Table` to transform data.
- :class:`StatementSet`
The core component of the Table API. It's used to create jobs with multiple sinks.
- :class:`EnvironmentSettings`
Defines all the parameters used to initialize a :class:`TableEnvironment`.
- :class:`TableConfig`
A config to define the runtime behavior of the Table API.
It is used together with :class:`pyflink.datastream.StreamExecutionEnvironment` to create
:class:`StreamTableEnvironment`.
Classes to define user-defined functions:
- :class:`ScalarFunction`
Base interface for user-defined scalar function.
- :class:`TableFunction`
Base interface for user-defined table function.
- :class:`AggregateFunction`
Base interface for user-defined aggregate function.
- :class:`TableAggregateFunction`
Base interface for user-defined table aggregate function.
- :class:`FunctionContext`
Used to obtain global runtime information about the context in which the
user-defined function is executed, such as the metric group, and global job parameters, etc.
Classes to define window:
- :class:`window.GroupWindow`
Group windows group rows based on time or row-count intervals. See :class:`window.Tumble`,
:class:`window.Session` and :class:`window.Slide` for more details on how to create a tumble
window, session window, hop window separately.
- :class:`window.OverWindow`
Over window aggregates compute an aggregate for each input row over a range
of its neighboring rows. See :class:`window.Over` for more details on how to create an over
window.
Classes for catalog:
- :class:`catalog.Catalog`
Responsible for reading and writing metadata such as database/table/views/UDFs
from and to a catalog.
- :class:`catalog.HiveCatalog`
Responsible for reading and writing metadata stored in Hive.
Classes to define source & sink:
- :class:`TableDescriptor`
TableDescriptor is a template for creating a CatalogTable instance. It closely resembles the
"CREATE TABLE" SQL DDL statement, containing schema, connector options, and other
characteristics. Since tables in Flink are typically backed by external systems, the
descriptor describes how a connector (and possibly its format) are configured.
- :class:`FormatDescriptor`
Describes a format and its options for use with :class:`TableDescriptor`.
- :class:`Schema`
Describes the schema for use with :class:`TableDescriptor`. It represents the schema part of a
`CREATE TABLE (schema) WITH (options)` DDL statement in SQL. It defines columns of
different kind, constraints, time attributes, and watermark strategies. It is possible to
reference objects (such as functions or types) across different catalogs.
Classes for module:
- :class:`Module`
Defines a set of metadata, including functions, user defined types, operators, rules,
etc. Metadata from modules are regarded as built-in or system metadata that users can take
advantages of.
- :class:`module.HiveModule`
Implementation of :class:`Module` to provide Hive built-in metadata.
Other important classes:
- :class:`DataTypes`
Defines a list of data types available in Table API.
- :class:`Expression`
Represents a logical tree for producing a computation result for a column in a :class:`Table`.
Might be literal values, function calls, or field references.
- :class:`TableSchema`
Represents a table's structure with field names and data types.
- :class:`SqlDialect`
Enumeration of valid SQL compatibility modes.
- :class:`ChangelogMode`
The set of changes contained in a changelog.
- :class:`ExplainDetail`
Defines the types of details for explain result.
"""
from __future__ import absolute_import
from pyflink.table.changelog_mode import ChangelogMode
from pyflink.table.data_view import DataView, ListView, MapView
from pyflink.table.environment_settings import EnvironmentSettings
from pyflink.table.explain_detail import ExplainDetail
from pyflink.table.expression import Expression
from pyflink.table.module import Module, ModuleEntry
from pyflink.table.result_kind import ResultKind
from pyflink.table.schema import Schema
from pyflink.table.sinks import CsvTableSink, TableSink, WriteMode
from pyflink.table.sources import CsvTableSource, TableSource
from pyflink.table.sql_dialect import SqlDialect
from pyflink.table.statement_set import StatementSet
from pyflink.table.table import GroupWindowedTable, GroupedTable, OverWindowedTable, Table, \
WindowGroupedTable
from pyflink.table.table_config import TableConfig
from pyflink.table.table_descriptor import TableDescriptor, FormatDescriptor
from pyflink.table.table_environment import (TableEnvironment, StreamTableEnvironment)
from pyflink.table.table_result import TableResult
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import DataTypes, UserDefinedType, Row, RowKind
from pyflink.table.udf import FunctionContext, ScalarFunction, TableFunction, AggregateFunction, \
TableAggregateFunction
__all__ = [
'TableEnvironment',
'StreamTableEnvironment',
'Table',
'StatementSet',
'EnvironmentSettings',
'TableConfig',
'GroupedTable',
'GroupWindowedTable',
'OverWindowedTable',
'WindowGroupedTable',
'ScalarFunction',
'TableFunction',
'AggregateFunction',
'TableAggregateFunction',
'FunctionContext',
'DataView',
'ListView',
'MapView',
'TableDescriptor',
'FormatDescriptor',
'Schema',
'Module',
'ModuleEntry',
'SqlDialect',
'DataTypes',
'UserDefinedType',
'Expression',
'TableSchema',
'TableResult',
'Row',
'RowKind',
'ChangelogMode',
'ExplainDetail',
'TableSource',
'TableSink',
'CsvTableSource',
'CsvTableSink',
'WriteMode',
'ResultKind'
]
| 7,396 | 40.79096 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/sql_dialect.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
__all__ = ['SqlDialect']
class SqlDialect(object):
"""
Enumeration of valid SQL compatibility modes.
In most of the cases, the built-in compatibility mode should be sufficient. For some features,
i.e. the "INSERT INTO T PARTITION(a='xxx') ..." grammar, you may need to switch to the
Hive dialect if required.
We may introduce other SQL dialects in the future.
:data:`DEFAULT`:
Flink's default SQL behavior.
:data:`HIVE`:
SQL dialect that allows some Apache Hive specific grammar.
Note: We might never support all of the Hive grammar. See the documentation for
supported features.
"""
DEFAULT = 0
HIVE = 1
@staticmethod
def _from_j_sql_dialect(j_sql_dialect):
gateway = get_gateway()
JSqlDialect = gateway.jvm.org.apache.flink.table.api.SqlDialect
if j_sql_dialect == JSqlDialect.DEFAULT:
return SqlDialect.DEFAULT
elif j_sql_dialect == JSqlDialect.HIVE:
return SqlDialect.HIVE
else:
raise Exception("Unsupported Java SQL dialect: %s" % j_sql_dialect)
@staticmethod
def _to_j_sql_dialect(sql_dialect):
gateway = get_gateway()
JSqlDialect = gateway.jvm.org.apache.flink.table.api.SqlDialect
if sql_dialect == SqlDialect.DEFAULT:
return JSqlDialect.DEFAULT
elif sql_dialect == SqlDialect.HIVE:
return JSqlDialect.HIVE
else:
raise TypeError("Unsupported SQL dialect: %s, supported SQL dialects are: "
"SqlDialect.DEFAULT, SqlDialect.HIVE." % sql_dialect)
| 2,623 | 36.485714 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/catalog.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict, List, Optional
from py4j.java_gateway import java_import
from pyflink.java_gateway import get_gateway
from pyflink.table.schema import Schema
from pyflink.table.table_schema import TableSchema
__all__ = ['Catalog', 'CatalogDatabase', 'CatalogBaseTable', 'CatalogPartition', 'CatalogFunction',
'Procedure', 'ObjectPath', 'CatalogPartitionSpec', 'CatalogTableStatistics',
'CatalogColumnStatistics', 'HiveCatalog']
class Catalog(object):
"""
Catalog is responsible for reading and writing metadata such as database/table/views/UDFs
from a registered catalog. It connects a registered catalog and Flink's Table API.
"""
def __init__(self, j_catalog):
self._j_catalog = j_catalog
def get_default_database(self) -> str:
"""
Get the name of the default database for this catalog. The default database will be the
current database for the catalog when user's session doesn't specify a current database.
The value probably comes from configuration, will not change for the life time of the
catalog instance.
:return: The name of the current database.
:raise: CatalogException in case of any runtime exception.
"""
return self._j_catalog.getDefaultDatabase()
def list_databases(self) -> List[str]:
"""
Get the names of all databases in this catalog.
:return: A list of the names of all databases.
:raise: CatalogException in case of any runtime exception.
"""
return list(self._j_catalog.listDatabases())
def get_database(self, database_name: str) -> 'CatalogDatabase':
"""
Get a database from this catalog.
:param database_name: Name of the database.
:return: The requested database :class:`CatalogDatabase`.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the database does not exist.
"""
return CatalogDatabase._get(self._j_catalog.getDatabase(database_name))
def database_exists(self, database_name: str) -> bool:
"""
Check if a database exists in this catalog.
:param database_name: Name of the database.
:return: true if the given database exists in the catalog false otherwise.
:raise: CatalogException in case of any runtime exception.
"""
return self._j_catalog.databaseExists(database_name)
def create_database(self, name: str, database: 'CatalogDatabase', ignore_if_exists: bool):
"""
Create a database.
:param name: Name of the database to be created.
:param database: The :class:`CatalogDatabase` database definition.
:param ignore_if_exists: Flag to specify behavior when a database with the given name
already exists:
if set to false, throw a DatabaseAlreadyExistException,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
DatabaseAlreadyExistException if the given database already exists and
ignoreIfExists is false.
"""
self._j_catalog.createDatabase(name, database._j_catalog_database, ignore_if_exists)
def drop_database(self, name: str, ignore_if_exists: bool):
"""
Drop a database.
:param name: Name of the database to be dropped.
:param ignore_if_exists: Flag to specify behavior when the database does not exist:
if set to false, throw an exception,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the given database does not exist.
"""
self._j_catalog.dropDatabase(name, ignore_if_exists)
def alter_database(self, name: str, new_database: 'CatalogDatabase',
ignore_if_not_exists: bool):
"""
Modify an existing database.
:param name: Name of the database to be modified.
:param new_database: The new database :class:`CatalogDatabase` definition.
:param ignore_if_not_exists: Flag to specify behavior when the given database does not
exist:
if set to false, throw an exception,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the given database does not exist.
"""
self._j_catalog.alterDatabase(name, new_database._j_catalog_database, ignore_if_not_exists)
def list_tables(self, database_name: str) -> List[str]:
"""
Get names of all tables and views under this database. An empty list is returned if none
exists.
:param database_name: Name of the given database.
:return: A list of the names of all tables and views in this database.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the database does not exist.
"""
return list(self._j_catalog.listTables(database_name))
def list_views(self, database_name: str) -> List[str]:
"""
Get names of all views under this database. An empty list is returned if none exists.
:param database_name: Name of the given database.
:return: A list of the names of all views in the given database.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the database does not exist.
"""
return list(self._j_catalog.listViews(database_name))
def get_table(self, table_path: 'ObjectPath') -> 'CatalogBaseTable':
"""
Get a CatalogTable or CatalogView identified by tablePath.
:param table_path: Path :class:`ObjectPath` of the table or view.
:return: The requested table or view :class:`CatalogBaseTable`.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the target does not exist.
"""
return CatalogBaseTable._get(self._j_catalog.getTable(table_path._j_object_path))
def table_exists(self, table_path: 'ObjectPath') -> bool:
"""
Check if a table or view exists in this catalog.
:param table_path: Path :class:`ObjectPath` of the table or view.
:return: true if the given table exists in the catalog false otherwise.
:raise: CatalogException in case of any runtime exception.
"""
return self._j_catalog.tableExists(table_path._j_object_path)
def drop_table(self, table_path: 'ObjectPath', ignore_if_not_exists: bool):
"""
Drop a table or view.
:param table_path: Path :class:`ObjectPath` of the table or view to be dropped.
:param ignore_if_not_exists: Flag to specify behavior when the table or view does not exist:
if set to false, throw an exception,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table or view does not exist.
"""
self._j_catalog.dropTable(table_path._j_object_path, ignore_if_not_exists)
def rename_table(self, table_path: 'ObjectPath', new_table_name: str,
ignore_if_not_exists: bool):
"""
Rename an existing table or view.
:param table_path: Path :class:`ObjectPath` of the table or view to be renamed.
:param new_table_name: The new name of the table or view.
:param ignore_if_not_exists: Flag to specify behavior when the table or view does not exist:
if set to false, throw an exception,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table does not exist.
"""
self._j_catalog.renameTable(table_path._j_object_path, new_table_name, ignore_if_not_exists)
def create_table(self, table_path: 'ObjectPath', table: 'CatalogBaseTable',
ignore_if_exists: bool):
"""
Create a new table or view.
:param table_path: Path :class:`ObjectPath` of the table or view to be created.
:param table: The table definition :class:`CatalogBaseTable`.
:param ignore_if_exists: Flag to specify behavior when a table or view already exists at
the given path:
if set to false, it throws a TableAlreadyExistException,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the database in tablePath doesn't exist.
TableAlreadyExistException if table already exists and ignoreIfExists is false.
"""
self._j_catalog.createTable(table_path._j_object_path, table._j_catalog_base_table,
ignore_if_exists)
def alter_table(self, table_path: 'ObjectPath', new_table: 'CatalogBaseTable',
ignore_if_not_exists):
"""
Modify an existing table or view.
Note that the new and old CatalogBaseTable must be of the same type. For example,
this doesn't allow alter a regular table to partitioned table, or alter a view to a table,
and vice versa.
:param table_path: Path :class:`ObjectPath` of the table or view to be modified.
:param new_table: The new table definition :class:`CatalogBaseTable`.
:param ignore_if_not_exists: Flag to specify behavior when the table or view does not exist:
if set to false, throw an exception,
if set to true, do nothing.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table does not exist.
"""
self._j_catalog.alterTable(table_path._j_object_path, new_table._j_catalog_base_table,
ignore_if_not_exists)
def list_partitions(self,
table_path: 'ObjectPath',
partition_spec: 'CatalogPartitionSpec' = None)\
-> List['CatalogPartitionSpec']:
"""
Get CatalogPartitionSpec of all partitions of the table.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: The partition spec :class:`CatalogPartitionSpec` to list.
:return: A list of :class:`CatalogPartitionSpec` of the table.
:raise: CatalogException in case of any runtime exception.
TableNotExistException thrown if the table does not exist in the catalog.
TableNotPartitionedException thrown if the table is not partitioned.
"""
if partition_spec is None:
return [CatalogPartitionSpec(p) for p in self._j_catalog.listPartitions(
table_path._j_object_path)]
else:
return [CatalogPartitionSpec(p) for p in self._j_catalog.listPartitions(
table_path._j_object_path, partition_spec._j_catalog_partition_spec)]
def get_partition(self, table_path: 'ObjectPath', partition_spec: 'CatalogPartitionSpec') \
-> 'CatalogPartition':
"""
Get a partition of the given table.
The given partition spec keys and values need to be matched exactly for a result.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: The partition spec :class:`CatalogPartitionSpec` of partition to get.
:return: The requested partition :class:`CatalogPartition`.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException thrown if the partition doesn't exist.
"""
return CatalogPartition._get(self._j_catalog.getPartition(
table_path._j_object_path, partition_spec._j_catalog_partition_spec))
def partition_exists(self, table_path: 'ObjectPath',
partition_spec: 'CatalogPartitionSpec') -> bool:
"""
Check whether a partition exists or not.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition to
check.
:return: true if the partition exists.
:raise: CatalogException in case of any runtime exception.
"""
return self._j_catalog.partitionExists(
table_path._j_object_path, partition_spec._j_catalog_partition_spec)
def create_partition(self, table_path: 'ObjectPath', partition_spec: 'CatalogPartitionSpec',
partition: 'CatalogPartition', ignore_if_exists: bool):
"""
Create a partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition.
:param partition: The partition :class:`CatalogPartition` to add.
:param ignore_if_exists: Flag to specify behavior if a table with the given name already
exists:
if set to false, it throws a TableAlreadyExistException,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
TableNotExistException thrown if the target table does not exist.
TableNotPartitionedException thrown if the target table is not partitioned.
PartitionSpecInvalidException thrown if the given partition spec is invalid.
PartitionAlreadyExistsException thrown if the target partition already exists.
"""
self._j_catalog.createPartition(table_path._j_object_path,
partition_spec._j_catalog_partition_spec,
partition._j_catalog_partition,
ignore_if_exists)
def drop_partition(self, table_path: 'ObjectPath', partition_spec: 'CatalogPartitionSpec',
ignore_if_not_exists: bool):
"""
Drop a partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition to
drop.
:param ignore_if_not_exists: Flag to specify behavior if the database does not exist:
if set to false, throw an exception,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException thrown if the target partition does not exist.
"""
self._j_catalog.dropPartition(table_path._j_object_path,
partition_spec._j_catalog_partition_spec,
ignore_if_not_exists)
def alter_partition(self, table_path: 'ObjectPath', partition_spec: 'CatalogPartitionSpec',
new_partition: 'CatalogPartition', ignore_if_not_exists: bool):
"""
Alter a partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition to
alter.
:param new_partition: New partition :class:`CatalogPartition` to replace the old one.
:param ignore_if_not_exists: Flag to specify behavior if the database does not exist:
if set to false, throw an exception,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException thrown if the target partition does not exist.
"""
self._j_catalog.alterPartition(table_path._j_object_path,
partition_spec._j_catalog_partition_spec,
new_partition._j_catalog_partition,
ignore_if_not_exists)
def list_functions(self, database_name: str) -> List[str]:
"""
List the names of all functions in the given database. An empty list is returned if none is
registered.
:param database_name: Name of the database.
:return: A list of the names of the functions in this database.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the database does not exist.
"""
return list(self._j_catalog.listFunctions(database_name))
def list_procedures(self, database_name: str) -> List[str]:
"""
List the names of all procedures in the given database. An empty list is returned if none is
registered.
:param database_name: Name of the database.
:return: A list of the names of the procedures in this database.
:raise: CatalogException in case of any runtime exception.
DatabaseNotExistException if the database does not exist.
"""
return list(self._j_catalog.listProcedures(database_name))
def get_function(self, function_path: 'ObjectPath') -> 'CatalogFunction':
"""
Get the function.
:param function_path: Path :class:`ObjectPath` of the function.
:return: The requested function :class:`CatalogFunction`.
:raise: CatalogException in case of any runtime exception.
FunctionNotExistException if the function does not exist in the catalog.
"""
return CatalogFunction._get(self._j_catalog.getFunction(function_path._j_object_path))
def get_procedure(self, procedure_path: 'ObjectPath') -> 'Procedure':
"""
Get the procedure.
:param procedure_path: Path :class:`ObjectPath` of the procedure.
:return: The requested procedure :class:`Procedure`.
:raise: CatalogException in case of any runtime exception.
ProcedureNotExistException if the procedure does not exist in the catalog.
"""
return Procedure._get(self._j_catalog.getProcedure(procedure_path._j_object_path))
def function_exists(self, function_path: 'ObjectPath') -> bool:
"""
Check whether a function exists or not.
:param function_path: Path :class:`ObjectPath` of the function.
:return: true if the function exists in the catalog false otherwise.
:raise: CatalogException in case of any runtime exception.
"""
return self._j_catalog.functionExists(function_path._j_object_path)
def create_function(self, function_path: 'ObjectPath', function: 'CatalogFunction',
ignore_if_exists: bool):
"""
Create a function.
:param function_path: Path :class:`ObjectPath` of the function.
:param function: The function :class:`CatalogFunction` to be created.
:param ignore_if_exists: Flag to specify behavior if a function with the given name
already exists:
if set to false, it throws a FunctionAlreadyExistException,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
FunctionAlreadyExistException if the function already exist.
DatabaseNotExistException if the given database does not exist.
"""
self._j_catalog.createFunction(function_path._j_object_path,
function._j_catalog_function,
ignore_if_exists)
def alter_function(self, function_path: 'ObjectPath', new_function: 'CatalogFunction',
ignore_if_not_exists: bool):
"""
Modify an existing function.
:param function_path: Path :class:`ObjectPath` of the function.
:param new_function: The function :class:`CatalogFunction` to be modified.
:param ignore_if_not_exists: Flag to specify behavior if the function does not exist:
if set to false, throw an exception
if set to true, nothing happens
:raise: CatalogException in case of any runtime exception.
FunctionNotExistException if the function does not exist.
"""
self._j_catalog.alterFunction(function_path._j_object_path,
new_function._j_catalog_function,
ignore_if_not_exists)
def drop_function(self, function_path: 'ObjectPath', ignore_if_not_exists: bool):
"""
Drop a function.
:param function_path: Path :class:`ObjectPath` of the function to be dropped.
:param ignore_if_not_exists: Flag to specify behavior if the function does not exist:
if set to false, throw an exception
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
FunctionNotExistException if the function does not exist.
"""
self._j_catalog.dropFunction(function_path._j_object_path, ignore_if_not_exists)
def get_table_statistics(self, table_path: 'ObjectPath') -> 'CatalogTableStatistics':
"""
Get the statistics of a table.
:param table_path: Path :class:`ObjectPath` of the table.
:return: The statistics :class:`CatalogTableStatistics` of the given table.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table does not exist in the catalog.
"""
return CatalogTableStatistics(
j_catalog_table_statistics=self._j_catalog.getTableStatistics(
table_path._j_object_path))
def get_table_column_statistics(self, table_path: 'ObjectPath') -> 'CatalogColumnStatistics':
"""
Get the column statistics of a table.
:param table_path: Path :class:`ObjectPath` of the table.
:return: The column statistics :class:`CatalogColumnStatistics` of the given table.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table does not exist in the catalog.
"""
return CatalogColumnStatistics(
j_catalog_column_statistics=self._j_catalog.getTableColumnStatistics(
table_path._j_object_path))
def get_partition_statistics(self,
table_path: 'ObjectPath',
partition_spec: 'CatalogPartitionSpec') \
-> 'CatalogTableStatistics':
"""
Get the statistics of a partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition.
:return: The statistics :class:`CatalogTableStatistics` of the given partition.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException if the partition does not exist.
"""
return CatalogTableStatistics(
j_catalog_table_statistics=self._j_catalog.getPartitionStatistics(
table_path._j_object_path, partition_spec._j_catalog_partition_spec))
def bulk_get_partition_statistics(self,
table_path: 'ObjectPath',
partition_specs: List['CatalogPartitionSpec']) \
-> List['CatalogTableStatistics']:
"""
Get a list of statistics of given partitions.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_specs: The list of :class:`CatalogPartitionSpec` of the given partitions.
:return: The statistics list of :class:`CatalogTableStatistics` of the given partitions.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException if the partition does not exist.
"""
return [CatalogTableStatistics(j_catalog_table_statistics=p)
for p in self._j_catalog.bulkGetPartitionStatistics(table_path._j_object_path,
partition_specs)]
def get_partition_column_statistics(self,
table_path: 'ObjectPath',
partition_spec: 'CatalogPartitionSpec') \
-> 'CatalogColumnStatistics':
"""
Get the column statistics of a partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition.
:return: The column statistics :class:`CatalogColumnStatistics` of the given partition.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException if the partition does not exist.
"""
return CatalogColumnStatistics(
j_catalog_column_statistics=self._j_catalog.getPartitionColumnStatistics(
table_path._j_object_path, partition_spec._j_catalog_partition_spec))
def bulk_get_partition_column_statistics(self,
table_path: 'ObjectPath',
partition_specs: List['CatalogPartitionSpec']) \
-> List['CatalogColumnStatistics']:
"""
Get a list of the column statistics for the given partitions.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_specs: The list of :class:`CatalogPartitionSpec` of the given partitions.
:return: The statistics list of :class:`CatalogTableStatistics` of the given partitions.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException if the partition does not exist.
"""
return [CatalogColumnStatistics(j_catalog_column_statistics=p)
for p in self._j_catalog.bulkGetPartitionStatistics(
table_path._j_object_path, partition_specs)]
def alter_table_statistics(self,
table_path: 'ObjectPath',
table_statistics: 'CatalogTableStatistics',
ignore_if_not_exists: bool):
"""
Update the statistics of a table.
:param table_path: Path :class:`ObjectPath` of the table.
:param table_statistics: New statistics :class:`CatalogTableStatistics` to update.
:param ignore_if_not_exists: Flag to specify behavior if the table does not exist:
if set to false, throw an exception,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table does not exist in the catalog.
"""
self._j_catalog.alterTableStatistics(
table_path._j_object_path,
table_statistics._j_catalog_table_statistics,
ignore_if_not_exists)
def alter_table_column_statistics(self,
table_path: 'ObjectPath',
column_statistics: 'CatalogColumnStatistics',
ignore_if_not_exists: bool):
"""
Update the column statistics of a table.
:param table_path: Path :class:`ObjectPath` of the table.
:param column_statistics: New column statistics :class:`CatalogColumnStatistics` to update.
:param ignore_if_not_exists: Flag to specify behavior if the column does not exist:
if set to false, throw an exception,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
TableNotExistException if the table does not exist in the catalog.
"""
self._j_catalog.alterTableColumnStatistics(
table_path._j_object_path,
column_statistics._j_catalog_column_statistics,
ignore_if_not_exists)
def alter_partition_statistics(self,
table_path: 'ObjectPath',
partition_spec: 'CatalogPartitionSpec',
partition_statistics: 'CatalogTableStatistics',
ignore_if_not_exists: bool):
"""
Update the statistics of a table partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition.
:param partition_statistics: New statistics :class:`CatalogTableStatistics` to update.
:param ignore_if_not_exists: Flag to specify behavior if the partition does not exist:
if set to false, throw an exception,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException if the partition does not exist.
"""
self._j_catalog.alterPartitionStatistics(
table_path._j_object_path,
partition_spec._j_catalog_partition_spec,
partition_statistics._j_catalog_table_statistics,
ignore_if_not_exists)
def alter_partition_column_statistics(self,
table_path: 'ObjectPath',
partition_spec: 'CatalogPartitionSpec',
column_statistics: 'CatalogColumnStatistics',
ignore_if_not_exists: bool):
"""
Update the column statistics of a table partition.
:param table_path: Path :class:`ObjectPath` of the table.
:param partition_spec: Partition spec :class:`CatalogPartitionSpec` of the partition.
:param column_statistics: New column statistics :class:`CatalogColumnStatistics` to update.
:param ignore_if_not_exists: Flag to specify behavior if the partition does not exist:
if set to false, throw an exception,
if set to true, nothing happens.
:raise: CatalogException in case of any runtime exception.
PartitionNotExistException if the partition does not exist.
"""
self._j_catalog.alterPartitionColumnStatistics(
table_path._j_object_path,
partition_spec._j_catalog_partition_spec,
column_statistics._j_catalog_column_statistics,
ignore_if_not_exists)
class CatalogDatabase(object):
"""
Represents a database object in a catalog.
"""
def __init__(self, j_catalog_database):
self._j_catalog_database = j_catalog_database
@staticmethod
def create_instance(
properties: Dict[str, str],
comment: str = None
) -> "CatalogDatabase":
"""
Creates an instance of CatalogDatabase.
:param properties: Property of the database
:param comment: Comment of the database
"""
assert properties is not None
gateway = get_gateway()
return CatalogDatabase(gateway.jvm.org.apache.flink.table.catalog.CatalogDatabaseImpl(
properties, comment))
@staticmethod
def _get(j_catalog_database):
return CatalogDatabase(j_catalog_database)
def get_properties(self) -> Dict[str, str]:
"""
Get a map of properties associated with the database.
"""
return dict(self._j_catalog_database.getProperties())
def get_comment(self) -> str:
"""
Get comment of the database.
:return: Comment of the database.
"""
return self._j_catalog_database.getComment()
def copy(self) -> 'CatalogDatabase':
"""
Get a deep copy of the CatalogDatabase instance.
:return: A copy of CatalogDatabase instance.
"""
return CatalogDatabase(self._j_catalog_database.copy())
def get_description(self) -> Optional[str]:
"""
Get a brief description of the database.
:return: An optional short description of the database.
"""
description = self._j_catalog_database.getDescription()
if description.isPresent():
return description.get()
else:
return None
def get_detailed_description(self) -> Optional[str]:
"""
Get a detailed description of the database.
:return: An optional long description of the database.
"""
detailed_description = self._j_catalog_database.getDetailedDescription()
if detailed_description.isPresent():
return detailed_description.get()
else:
return None
class CatalogBaseTable(object):
"""
CatalogBaseTable is the common parent of table and view. It has a map of
key-value pairs defining the properties of the table.
"""
def __init__(self, j_catalog_base_table):
self._j_catalog_base_table = j_catalog_base_table
@staticmethod
def create_table(
schema: TableSchema,
partition_keys: List[str] = [],
properties: Dict[str, str] = {},
comment: str = None
) -> "CatalogBaseTable":
"""
Create an instance of CatalogBaseTable for the catalog table.
:param schema: the table schema
:param partition_keys: the partition keys, default empty
:param properties: the properties of the catalog table
:param comment: the comment of the catalog table
"""
assert schema is not None
assert partition_keys is not None
assert properties is not None
gateway = get_gateway()
return CatalogBaseTable(
gateway.jvm.org.apache.flink.table.catalog.CatalogTableImpl(
schema._j_table_schema, partition_keys, properties, comment))
@staticmethod
def create_view(
original_query: str,
expanded_query: str,
schema: TableSchema,
properties: Dict[str, str],
comment: str = None
) -> "CatalogBaseTable":
"""
Create an instance of CatalogBaseTable for the catalog view.
:param original_query: the original text of the view definition
:param expanded_query: the expanded text of the original view definition, this is needed
because the context such as current DB is lost after the session,
in which view is defined, is gone. Expanded query text takes care
of the this, as an example.
:param schema: the table schema
:param properties: the properties of the catalog view
:param comment: the comment of the catalog view
"""
assert original_query is not None
assert expanded_query is not None
assert schema is not None
assert properties is not None
gateway = get_gateway()
return CatalogBaseTable(
gateway.jvm.org.apache.flink.table.catalog.CatalogViewImpl(
original_query, expanded_query, schema._j_table_schema, properties, comment))
@staticmethod
def _get(j_catalog_base_table):
return CatalogBaseTable(j_catalog_base_table)
def get_options(self):
"""
Returns a map of string-based options.
In case of CatalogTable, these options may determine the kind of connector and its
configuration for accessing the data in the external system.
:return: Property map of the table/view.
.. versionadded:: 1.11.0
"""
return dict(self._j_catalog_base_table.getOptions())
def get_schema(self) -> TableSchema:
"""
Get the schema of the table.
:return: Schema of the table/view.
. note:: Deprecated in 1.14. This method returns the deprecated TableSchema class. The old
class was a hybrid of resolved and unresolved schema information. It has been replaced by
the new Schema which is always unresolved and will be resolved by the framework later.
"""
return TableSchema(j_table_schema=self._j_catalog_base_table.getSchema())
def get_unresolved_schema(self) -> Schema:
"""
Returns the schema of the table or view.
The schema can reference objects from other catalogs and will be resolved and validated by
the framework when accessing the table or view.
"""
return Schema(self._j_catalog_base_table.getUnresolvedSchema())
def get_comment(self) -> str:
"""
Get comment of the table or view.
:return: Comment of the table/view.
"""
return self._j_catalog_base_table.getComment()
def copy(self) -> 'CatalogBaseTable':
"""
Get a deep copy of the CatalogBaseTable instance.
:return: An copy of the CatalogBaseTable instance.
"""
return CatalogBaseTable(self._j_catalog_base_table.copy())
def get_description(self) -> Optional[str]:
"""
Get a brief description of the table or view.
:return: An optional short description of the table/view.
"""
description = self._j_catalog_base_table.getDescription()
if description.isPresent():
return description.get()
else:
return None
def get_detailed_description(self) -> Optional[str]:
"""
Get a detailed description of the table or view.
:return: An optional long description of the table/view.
"""
detailed_description = self._j_catalog_base_table.getDetailedDescription()
if detailed_description.isPresent():
return detailed_description.get()
else:
return None
class CatalogPartition(object):
"""
Represents a partition object in catalog.
"""
def __init__(self, j_catalog_partition):
self._j_catalog_partition = j_catalog_partition
@staticmethod
def create_instance(
properties: Dict[str, str],
comment: str = None
) -> "CatalogPartition":
"""
Creates an instance of CatalogPartition.
:param properties: Property of the partition
:param comment: Comment of the partition
"""
assert properties is not None
gateway = get_gateway()
return CatalogPartition(
gateway.jvm.org.apache.flink.table.catalog.CatalogPartitionImpl(
properties, comment))
@staticmethod
def _get(j_catalog_partition):
return CatalogPartition(j_catalog_partition)
def get_properties(self) -> Dict[str, str]:
"""
Get a map of properties associated with the partition.
:return: A map of properties with the partition.
"""
return dict(self._j_catalog_partition.getProperties())
def copy(self) -> 'CatalogPartition':
"""
Get a deep copy of the CatalogPartition instance.
:return: A copy of CatalogPartition instance.
"""
return CatalogPartition(self._j_catalog_partition.copy())
def get_description(self) -> Optional[str]:
"""
Get a brief description of the partition object.
:return: An optional short description of partition object.
"""
description = self._j_catalog_partition.getDescription()
if description.isPresent():
return description.get()
else:
return None
def get_detailed_description(self) -> Optional[str]:
"""
Get a detailed description of the partition object.
:return: An optional long description of the partition object.
"""
detailed_description = self._j_catalog_partition.getDetailedDescription()
if detailed_description.isPresent():
return detailed_description.get()
else:
return None
def get_comment(self) -> str:
"""
Get comment of the partition.
:return: Comment of the partition.
"""
return self._j_catalog_partition.getComment()
class CatalogFunction(object):
"""
Interface for a function in a catalog.
"""
def __init__(self, j_catalog_function):
self._j_catalog_function = j_catalog_function
@staticmethod
def create_instance(
class_name: str,
function_language: str = 'Python'
) -> "CatalogFunction":
"""
Creates an instance of CatalogDatabase.
:param class_name: full qualified path of the class name
:param function_language: language of the function, must be one of
'Python', 'Java' or 'Scala'. (default Python)
"""
assert class_name is not None
gateway = get_gateway()
FunctionLanguage = gateway.jvm.org.apache.flink.table.catalog.FunctionLanguage
if function_language.lower() == 'python':
function_language = FunctionLanguage.PYTHON
elif function_language.lower() == 'java':
function_language = FunctionLanguage.JAVA
elif function_language.lower() == 'scala':
function_language = FunctionLanguage.SCALA
else:
raise ValueError("function_language must be one of 'Python', 'Java' or 'Scala'")
return CatalogFunction(
gateway.jvm.org.apache.flink.table.catalog.CatalogFunctionImpl(
class_name, function_language))
@staticmethod
def _get(j_catalog_function):
return CatalogFunction(j_catalog_function)
def get_class_name(self) -> str:
"""
Get the full name of the class backing the function.
:return: The full name of the class.
"""
return self._j_catalog_function.getClassName()
def copy(self) -> 'CatalogFunction':
"""
Create a deep copy of the function.
:return: A deep copy of "this" instance.
"""
return CatalogFunction(self._j_catalog_function.copy())
def get_description(self) -> Optional[str]:
"""
Get a brief description of the function.
:return: An optional short description of function.
"""
description = self._j_catalog_function.getDescription()
if description.isPresent():
return description.get()
else:
return None
def get_detailed_description(self) -> Optional[str]:
"""
Get a detailed description of the function.
:return: An optional long description of the function.
"""
detailed_description = self._j_catalog_function.getDetailedDescription()
if detailed_description.isPresent():
return detailed_description.get()
else:
return None
def is_generic(self) -> bool:
"""
Whether or not is the function a flink UDF.
:return: Whether is the function a flink UDF.
.. versionadded:: 1.10.0
"""
return self._j_catalog_function.isGeneric()
def get_function_language(self):
"""
Get the language used for the function definition.
:return: the language type of the function definition
.. versionadded:: 1.10.0
"""
return self._j_catalog_function.getFunctionLanguage()
class Procedure(object):
"""
Interface for a procedure in a catalog.
"""
def __init__(self, j_procedure):
self._j_procedure = j_procedure
@staticmethod
def _get(j_procedure):
return Procedure(j_procedure)
class ObjectPath(object):
"""
A database name and object (table/view/function) name combo in a catalog.
"""
def __init__(self, database_name=None, object_name=None, j_object_path=None):
if j_object_path is None:
gateway = get_gateway()
self._j_object_path = gateway.jvm.ObjectPath(database_name, object_name)
else:
self._j_object_path = j_object_path
def __str__(self):
return self._j_object_path.toString()
def __hash__(self):
return self._j_object_path.hashCode()
def __eq__(self, other):
return isinstance(other, self.__class__) and self._j_object_path.equals(
other._j_object_path)
def get_database_name(self) -> str:
return self._j_object_path.getDatabaseName()
def get_object_name(self) -> str:
return self._j_object_path.getObjectName()
def get_full_name(self) -> str:
return self._j_object_path.getFullName()
@staticmethod
def from_string(full_name: str) -> 'ObjectPath':
gateway = get_gateway()
return ObjectPath(j_object_path=gateway.jvm.ObjectPath.fromString(full_name))
class CatalogPartitionSpec(object):
"""
Represents a partition spec object in catalog.
Partition columns and values are NOT of strict order, and they need to be re-arranged to the
correct order by comparing with a list of strictly ordered partition keys.
"""
def __init__(self, partition_spec):
if isinstance(partition_spec, dict):
gateway = get_gateway()
self._j_catalog_partition_spec = gateway.jvm.CatalogPartitionSpec(partition_spec)
else:
self._j_catalog_partition_spec = partition_spec
def __str__(self):
return self._j_catalog_partition_spec.toString()
def __hash__(self):
return self._j_catalog_partition_spec.hashCode()
def __eq__(self, other):
return isinstance(other, self.__class__) and self._j_catalog_partition_spec.equals(
other._j_catalog_partition_spec)
def get_partition_spec(self) -> Dict[str, str]:
"""
Get the partition spec as key-value map.
:return: A map of partition spec keys and values.
"""
return dict(self._j_catalog_partition_spec.getPartitionSpec())
class CatalogTableStatistics(object):
"""
Statistics for a non-partitioned table or a partition of a partitioned table.
"""
def __init__(self, row_count=None, field_count=None, total_size=None, raw_data_size=None,
properties=None, j_catalog_table_statistics=None):
gateway = get_gateway()
java_import(gateway.jvm, "org.apache.flink.table.catalog.stats.CatalogTableStatistics")
if j_catalog_table_statistics is None:
if properties is None:
self._j_catalog_table_statistics = gateway.jvm.CatalogTableStatistics(
row_count, field_count, total_size, raw_data_size)
else:
self._j_catalog_table_statistics = gateway.jvm.CatalogTableStatistics(
row_count, field_count, total_size, raw_data_size, properties)
else:
self._j_catalog_table_statistics = j_catalog_table_statistics
def get_row_count(self) -> int:
"""
The number of rows in the table or partition.
"""
return self._j_catalog_table_statistics.getRowCount()
def get_field_count(self) -> int:
"""
The number of files on disk.
"""
return self._j_catalog_table_statistics.getFileCount()
def get_total_size(self) -> int:
"""
The total size in bytes.
"""
return self._j_catalog_table_statistics.getTotalSize()
def get_raw_data_size(self) -> int:
"""
The raw data size (size when loaded in memory) in bytes.
"""
return self._j_catalog_table_statistics.getRawDataSize()
def get_properties(self) -> Dict[str, str]:
return dict(self._j_catalog_table_statistics.getProperties())
def copy(self) -> 'CatalogTableStatistics':
"""
Create a deep copy of "this" instance.
"""
return CatalogTableStatistics(
j_catalog_table_statistics=self._j_catalog_table_statistics.copy())
class CatalogColumnStatistics(object):
"""
Column statistics of a table or partition.
"""
def __init__(self, column_statistics_data=None, properties=None,
j_catalog_column_statistics=None):
if j_catalog_column_statistics is None:
gateway = get_gateway()
java_import(gateway.jvm, "org.apache.flink.table.catalog.stats.CatalogColumnStatistics")
if properties is None:
self._j_catalog_column_statistics = gateway.jvm.CatalogColumnStatistics(
column_statistics_data)
else:
self._j_catalog_column_statistics = gateway.jvm.CatalogColumnStatistics(
column_statistics_data, properties)
else:
self._j_catalog_column_statistics = j_catalog_column_statistics
def get_column_statistics_data(self):
return self._j_catalog_column_statistics.getColumnStatisticsData()
def get_properties(self) -> Dict[str, str]:
return dict(self._j_catalog_column_statistics.getProperties())
def copy(self) -> 'CatalogColumnStatistics':
return CatalogColumnStatistics(
j_catalog_column_statistics=self._j_catalog_column_statistics.copy())
class HiveCatalog(Catalog):
"""
A catalog implementation for Hive.
"""
def __init__(self, catalog_name: str, default_database: str = None, hive_conf_dir: str = None,
hadoop_conf_dir: str = None, hive_version: str = None):
assert catalog_name is not None
gateway = get_gateway()
j_hive_catalog = gateway.jvm.org.apache.flink.table.catalog.hive.HiveCatalog(
catalog_name, default_database, hive_conf_dir, hadoop_conf_dir, hive_version)
super(HiveCatalog, self).__init__(j_hive_catalog)
class JdbcCatalog(Catalog):
"""
A catalog implementation for Jdbc.
"""
def __init__(self, catalog_name: str, default_database: str, username: str, pwd: str,
base_url: str):
assert catalog_name is not None
assert default_database is not None
assert username is not None
assert pwd is not None
assert base_url is not None
from pyflink.java_gateway import get_gateway
gateway = get_gateway()
j_jdbc_catalog = gateway.jvm.org.apache.flink.connector.jdbc.catalog.JdbcCatalog(
catalog_name, default_database, username, pwd, base_url)
super(JdbcCatalog, self).__init__(j_jdbc_catalog)
| 52,458 | 41.580357 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/sources.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table.types import _to_java_data_type
__all__ = ['TableSource', 'CsvTableSource']
class TableSource(object):
"""
Defines a table from an external system or location.
"""
def __init__(self, j_table_source):
self._j_table_source = j_table_source
class CsvTableSource(TableSource):
"""
A :class:`TableSource` for simple CSV files with a
(logically) unlimited number of fields.
Example:
::
>>> CsvTableSource("/csv/file/path", ["a", "b"], [DataTypes.INT(), DataTypes.STRING()])
:param source_path: The path to the CSV file.
:type source_path: str
:param field_names: The names of the table fields.
:type field_names: collections.Iterable[str]
:param field_types: The types of the table fields.
:type field_types: collections.Iterable[str]
:param field_delim: The field delimiter, "," by default.
:type field_delim: str, optional
:param line_delim: The row delimiter, "\\n" by default.
:type line_delim: str, optional
:param quote_character: An optional quote character for String values, null by default.
:type quote_character: str, optional
:param ignore_first_line: Flag to ignore the first line, false by default.
:type ignore_first_line: bool, optional
:param ignore_comments: An optional prefix to indicate comments, null by default.
:type ignore_comments: str, optional
:param lenient: Flag to skip records with parse error instead to fail, false by default.
:type lenient: bool, optional
:param empty_column_as_null: Treat empty column as null, false by default.
:type empty_column_as_null: bool, optional
"""
def __init__(
self,
source_path,
field_names,
field_types,
field_delim=None,
line_delim=None,
quote_character=None,
ignore_first_line=None,
ignore_comments=None,
lenient=None,
empty_column_as_null=None,
):
gateway = get_gateway()
builder = gateway.jvm.CsvTableSource.builder()
builder.path(source_path)
for (field_name, field_type) in zip(field_names, field_types):
builder.field(field_name, _to_java_data_type(field_type))
if field_delim is not None:
builder.fieldDelimiter(field_delim)
if line_delim is not None:
builder.lineDelimiter(line_delim)
if quote_character is not None:
# Java API has a Character type for this field. At time of writing,
# Py4J will convert the Python str to Java Character by taking only
# the first character. This results in either:
# - Silently truncating a Python str with more than one character
# with no further type error from either Py4J or Java
# CsvTableSource
# - java.lang.StringIndexOutOfBoundsException from Py4J for an
# empty Python str. That error can be made more friendly here.
if len(quote_character) != 1:
raise ValueError(
"Expected a single CSV quote character but got '{}'".format(quote_character)
)
builder.quoteCharacter(quote_character)
if ignore_first_line:
builder.ignoreFirstLine()
if ignore_comments is not None:
builder.commentPrefix(ignore_comments)
if lenient:
builder.ignoreParseErrors()
if empty_column_as_null:
builder.emptyColumnAsNull()
super(CsvTableSource, self).__init__(builder.build())
| 4,611 | 37.433333 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/table/schema.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Union, List
from pyflink.java_gateway import get_gateway
from pyflink.table import Expression
from pyflink.table.expression import _get_java_expression
from pyflink.table.types import DataType, _to_java_data_type
from pyflink.util.java_utils import to_jarray
__all__ = ['Schema']
class Schema(object):
"""
Schema of a table or view.
A schema represents the schema part of a {@code CREATE TABLE (schema) WITH (options)} DDL
statement in SQL. It defines columns of different kind, constraints, time attributes, and
watermark strategies. It is possible to reference objects (such as functions or types) across
different catalogs.
This class is used in the API and catalogs to define an unresolved schema that will be
translated to ResolvedSchema. Some methods of this class perform basic validation, however, the
main validation happens during the resolution. Thus, an unresolved schema can be incomplete and
might be enriched or merged with a different schema at a later stage.
Since an instance of this class is unresolved, it should not be directly persisted. The str()
shows only a summary of the contained objects.
"""
def __init__(self, j_schema):
self._j_schema = j_schema
@staticmethod
def new_builder() -> 'Schema.Builder':
gateway = get_gateway()
j_builder = gateway.jvm.Schema.newBuilder()
return Schema.Builder(j_builder)
def __str__(self):
return self._j_schema.toString()
def __eq__(self, other):
return self.__class__ == other.__class__ and self._j_schema.equals(other._j_schema)
def __hash__(self):
return self._j_schema.hashCode()
class Builder(object):
"""
A builder for constructing an immutable but still unresolved Schema.
"""
def __init__(self, j_builder):
self._j_builder = j_builder
def from_schema(self, unresolved_schema: 'Schema') -> 'Schema.Builder':
"""
Adopts all members from the given unresolved schema.
"""
self._j_builder.fromSchema(unresolved_schema._j_schema)
return self
def from_row_data_type(self, data_type: DataType) -> 'Schema.Builder':
"""
Adopts all fields of the given row as physical columns of the schema.
"""
self._j_builder.fromRowDataType(_to_java_data_type(data_type))
return self
def from_fields(self,
field_names: List[str],
field_data_types: List[DataType]) -> 'Schema.Builder':
"""
Adopts the given field names and field data types as physical columns of the schema.
"""
gateway = get_gateway()
j_field_names = to_jarray(gateway.jvm.String, field_names)
j_field_data_types = to_jarray(
gateway.jvm.AbstractDataType,
[_to_java_data_type(field_data_type) for field_data_type in field_data_types])
self._j_builder.fromFields(j_field_names, j_field_data_types)
return self
def column(self,
column_name: str,
data_type: Union[str, DataType]) -> 'Schema.Builder':
"""
Declares a physical column that is appended to this schema.
Physical columns are regular columns known from databases. They define the names, the
types, and the order of fields in the physical data. Thus, physical columns represent
the payload that is read from and written to an external system. Connectors and formats
use these columns (in the defined order) to configure themselves. Other kinds of columns
can be declared between physical columns but will not influence the final physical
schema.
:param column_name: Column name
:param data_type: Data type of the column
"""
if isinstance(data_type, str):
self._j_builder.column(column_name, data_type)
else:
self._j_builder.column(column_name, _to_java_data_type(data_type))
return self
def column_by_expression(self,
column_name: str,
expr: Union[str, Expression]) -> 'Schema.Builder':
"""
Declares a computed column that is appended to this schema.
Computed columns are virtual columns that are generated by evaluating an expression
that can reference other columns declared in the same table. Both physical columns and
metadata columns can be accessed. The column itself is not physically stored within the
table. The column’s data type is derived automatically from the given expression and
does not have to be declared manually.
Computed columns are commonly used for defining time attributes. For example, the
computed column can be used if the original field is not TIMESTAMP(3) type or is nested
in a JSON string.
Example:
::
>>> Schema.new_builder() \\
... .column_by_expression("ts", "orig_ts - INTERVAL '60' MINUTE") \\
... .column_by_metadata("orig_ts", DataTypes.TIMESTAMP(3), "timestamp")
:param column_name: Column name
:param expr: Computation of the column
"""
self._j_builder.columnByExpression(column_name, _get_java_expression(expr))
return self
def column_by_metadata(self,
column_name: str,
data_type: Union[DataType, str],
metadata_key: str = None,
is_virtual: bool = False) -> 'Schema.Builder':
"""
Declares a metadata column that is appended to this schema.
Metadata columns allow to access connector and/or format specific fields for every row
of a table. For example, a metadata column can be used to read and write the timestamp
from and to Kafka records for time-based operations. The connector and format
documentation lists the available metadata fields for every component.
Every metadata field is identified by a string-based key and has a documented data
type. The metadata key can be omitted if the column name should be used as the
identifying metadata key. For convenience, the runtime will perform an explicit cast if
the data type of the column differs from the data type of the metadata field. Of course,
this requires that the two data types are compatible.
By default, a metadata column can be used for both reading and writing. However, in
many cases an external system provides more read-only metadata fields than writable
fields. Therefore, it is possible to exclude metadata columns from persisting by setting
the {@code is_virtual} flag to {@code true}.
:param column_name: Column name
:param data_type: Data type of the column
:param metadata_key: Identifying metadata key, if null the column name will be used as
metadata key
:param is_virtual: Whether the column should be persisted or not
"""
if isinstance(data_type, DataType):
self._j_builder.columnByMetadata(
column_name,
_to_java_data_type(data_type),
metadata_key, is_virtual)
else:
self._j_builder.columnByMetadata(
column_name,
data_type,
metadata_key,
is_virtual)
return self
def watermark(self,
column_name: str,
watermark_expr: Union[str, Expression]) -> 'Schema.Builder':
"""
Declares that the given column should serve as an event-time (i.e. rowtime) attribute
and specifies a corresponding watermark strategy as an expression.
The column must be of type {@code TIMESTAMP(3)} or {@code TIMESTAMP_LTZ(3)} and be a
top-level column in the schema. It may be a computed column.
The watermark generation expression is evaluated by the framework for every record
during runtime. The framework will periodically emit the largest generated watermark. If
the current watermark is still identical to the previous one, or is null, or the value
of the returned watermark is smaller than that of the last emitted one, then no new
watermark will be emitted. A watermark is emitted in an interval defined by the
configuration.
Any scalar expression can be used for declaring a watermark strategy for
in-memory/temporary tables. However, currently, only SQL expressions can be persisted in
a catalog. The expression's return data type must be {@code TIMESTAMP(3)}. User-defined
functions (also defined in different catalogs) are supported.
Example:
::
>>> Schema.new_builder().watermark("ts", "ts - INTERVAL '5' SECOND")
:param column_name: The column name used as a rowtime attribute
:param watermark_expr: The expression used for watermark generation
"""
self._j_builder.watermark(column_name, _get_java_expression(watermark_expr))
return self
def primary_key(self, *column_names: str) -> 'Schema.Builder':
"""
Declares a primary key constraint for a set of given columns. Primary key uniquely
identify a row in a table. Neither of columns in a primary can be nullable. The primary
key is informational only. It will not be enforced. It can be used for optimizations. It
is the data owner's responsibility to ensure uniqueness of the data.
The primary key will be assigned a generated name in the format {@code PK_col1_col2}.
:param column_names: Columns that form a unique primary key
"""
gateway = get_gateway()
self._j_builder.primaryKey(to_jarray(gateway.jvm.java.lang.String, column_names))
return self
def primary_key_named(self,
constraint_name: str,
*column_names: str) -> 'Schema.Builder':
"""
Declares a primary key constraint for a set of given columns. Primary key uniquely
identify a row in a table. Neither of columns in a primary can be nullable. The primary
key is informational only. It will not be enforced. It can be used for optimizations. It
is the data owner's responsibility to ensure uniqueness of the data.
:param constraint_name: Name for the primary key, can be used to reference the
constraint
:param column_names: Columns that form a unique primary key
"""
gateway = get_gateway()
self._j_builder.primaryKeyNamed(
constraint_name,
to_jarray(gateway.jvm.java.lang.String, column_names))
return self
def build(self) -> 'Schema':
"""
Returns an instance of an unresolved Schema.
"""
return Schema(self._j_builder.build())
| 12,689 | 46.52809 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/changelog_mode.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
__all__ = ['ChangelogMode']
class ChangelogMode(object):
"""
The set of changes contained in a changelog.
"""
def __init__(self, j_changelog_mode):
self._j_changelog_mode = j_changelog_mode
@staticmethod
def insert_only():
gateway = get_gateway()
return ChangelogMode(
gateway.jvm.org.apache.flink.table.connector.ChangelogMode.insertOnly())
@staticmethod
def upsert():
gateway = get_gateway()
return ChangelogMode(
gateway.jvm.org.apache.flink.table.connector.ChangelogMode.upsert())
@staticmethod
def all():
gateway = get_gateway()
return ChangelogMode(
gateway.jvm.org.apache.flink.table.connector.ChangelogMode.all())
| 1,764 | 35.770833 | 84 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_correlate.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import expressions as expr
from pyflink.testing.test_case_utils import PyFlinkUTTestCase
class CorrelateTests(PyFlinkUTTestCase):
def test_join_lateral(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"split",
"org.apache.flink.table.utils.TestingFunctions$TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.join_lateral(expr.call('split', source.words).alias('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
def test_join_lateral_with_join_predicate(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"split",
"org.apache.flink.table.utils.TestingFunctions$TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.join_lateral(expr.call('split', source.words).alias('word'),
expr.col('id') == expr.col('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('equals(id, word)',
query_operation.getCondition().toString())
def test_left_outer_join_lateral(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"split",
"org.apache.flink.table.utils.TestingFunctions$TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
result = source.left_outer_join_lateral(expr.call('split', source.words).alias('word'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
def test_left_outer_join_lateral_with_join_predicate(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"split",
"org.apache.flink.table.utils.TestingFunctions$TableFunc1")
source = t_env.from_elements([("1", "1#3#5#7"), ("2", "2#4#6#8")], ["id", "words"])
# only support "true" as the join predicate currently
result = source.left_outer_join_lateral(expr.call('split', source.words).alias('word'),
expr.lit(True))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertTrue(query_operation.isCorrelated())
self.assertEqual('true', query_operation.getCondition().toString())
| 4,063 | 47.963855 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_table_config_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table import TableConfig
class TableConfigCompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`TableConfig` is consistent with
Java `org.apache.flink.table.api.TableConfig`.
"""
@classmethod
def python_class(cls):
return TableConfig
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.TableConfig"
@classmethod
def excluded_methods(cls):
# internal interfaces, no need to expose to users.
return {'getPlannerConfig', 'setPlannerConfig', 'addJobParameter',
'setRootConfiguration', 'getRootConfiguration', 'getOptional'}
@classmethod
def java_method_name(cls, python_method_name):
# Most time zone related libraries in Python use 'timezone' instead of 'time_zone'.
return {'get_local_timezone': 'get_local_time_zone',
'set_local_timezone': 'set_local_time_zone'}\
.get(python_method_name, python_method_name)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,347 | 38.133333 | 91 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_sort.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase
class BatchTableSortTests(PyFlinkBatchTableTestCase):
def test_order_by_offset_fetch(self):
t = self.t_env.from_elements([(1, "Hello")], ["a", "b"])
result = t.order_by(t.a.desc).offset(2).fetch(2)
query_operation = result._j_table.getQueryOperation()
self.assertEqual(2, query_operation.getOffset())
self.assertEqual(2, query_operation.getFetch())
self.assertEqual('[desc(a)]',
query_operation.getOrder().toString())
def test_limit(self):
t = self.t_env.from_elements([(1, "Hello")], ["a", "b"])
result = t.limit(1)
query_operation = result._j_table.getQueryOperation()
self.assertEqual(0, query_operation.getOffset())
self.assertEqual(1, query_operation.getFetch())
def test_limit_with_offset(self):
t = self.t_env.from_elements([(1, "Hello")], ["a", "b"])
result = t.limit(1, 2)
query_operation = result._j_table.getQueryOperation()
self.assertEqual(2, query_operation.getOffset())
self.assertEqual(1, query_operation.getFetch())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,398 | 38.983333 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_join.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableJoinTests(PyFlinkStreamTableTestCase):
def test_join_without_where(self):
t_env = self.t_env
t1 = t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
t2 = t_env.from_elements([(2, "Flink")], ['d', 'e'])
result = t1.join(t2, t1.a == t2.d)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertEqual('equals(a, d)',
query_operation.getCondition().toString())
self.assertFalse(query_operation.isCorrelated())
def test_join_with_where(self):
t_env = self.t_env
t1 = t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
t2 = t_env.from_elements([(2, "Flink")], ['d', 'e'])
result = t1.join(t2).where(t1.a == t2.d)
query_operation = result._j_table.getQueryOperation().getChildren().get(0)
self.assertEqual('INNER', query_operation.getJoinType().toString())
self.assertEqual('true', query_operation.getCondition().toString())
self.assertFalse(query_operation.isCorrelated())
def test_left_outer_join_without_where(self):
t_env = self.t_env
t1 = t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
t2 = t_env.from_elements([(2, "Flink")], ['d', 'e'])
result = t1.left_outer_join(t2, t1.a == t2.d)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertEqual('equals(a, d)',
query_operation.getCondition().toString())
self.assertFalse(query_operation.isCorrelated())
def test_left_outer_join_with_where(self):
t_env = self.t_env
t1 = t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
t2 = t_env.from_elements([(2, "Flink")], ['d', 'e'])
result = t1.left_outer_join(t2).where(t1.a == t2.d)
query_operation = result._j_table.getQueryOperation().getChildren().get(0)
self.assertEqual('LEFT_OUTER', query_operation.getJoinType().toString())
self.assertEqual('true', query_operation.getCondition().toString())
self.assertFalse(query_operation.isCorrelated())
def test_right_outer_join(self):
t_env = self.t_env
t1 = t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
t2 = t_env.from_elements([(2, "Flink")], ['d', 'e'])
result = t1.right_outer_join(t2, t1.a == t2.d)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('RIGHT_OUTER', query_operation.getJoinType().toString())
self.assertEqual('equals(a, d)',
query_operation.getCondition().toString())
self.assertFalse(query_operation.isCorrelated())
def test_full_outer_join(self):
t_env = self.t_env
t1 = t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
t2 = t_env.from_elements([(2, "Flink")], ['d', 'e'])
result = t1.full_outer_join(t2, t1.a == t2.d)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('FULL_OUTER', query_operation.getJoinType().toString())
self.assertEqual('equals(a, d)',
query_operation.getCondition().toString())
self.assertFalse(query_operation.isCorrelated())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 4,704 | 44.240385 | 82 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_schema.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import DataTypes
from pyflink.table.expressions import call_sql
from pyflink.table.schema import Schema
from pyflink.testing.test_case_utils import PyFlinkTestCase
class SchemaTest(PyFlinkTestCase):
def test_schema_basic(self):
old_schema = Schema.new_builder() \
.from_row_data_type(DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())])) \
.from_fields(["d", "e"], [DataTypes.STRING(), DataTypes.BOOLEAN()]) \
.build()
self.schema = Schema.new_builder() \
.from_schema(old_schema) \
.primary_key_named("primary_constraint", "id") \
.column("id", DataTypes.INT().not_null()) \
.column("counter", DataTypes.INT().not_null()) \
.column("payload", "ROW<name STRING, age INT, flag BOOLEAN>") \
.column_by_metadata("topic", DataTypes.STRING(), None, True) \
.column_by_expression("ts", call_sql("orig_ts - INTERVAL '60' MINUTE")) \
.column_by_metadata("orig_ts", DataTypes.TIMESTAMP(3), "timestamp") \
.watermark("ts", "ts - INTERVAL '5' SECOND") \
.column_by_expression("proctime", "PROCTIME()") \
.build()
self.assertEqual("""(
`a` TINYINT,
`b` SMALLINT,
`c` INT,
`d` STRING,
`e` BOOLEAN,
`id` INT NOT NULL,
`counter` INT NOT NULL,
`payload` [ROW<name STRING, age INT, flag BOOLEAN>],
`topic` METADATA VIRTUAL,
`ts` AS [orig_ts - INTERVAL '60' MINUTE],
`orig_ts` METADATA FROM 'timestamp',
`proctime` AS [PROCTIME()],
WATERMARK FOR `ts` AS [ts - INTERVAL '5' SECOND],
CONSTRAINT `primary_constraint` PRIMARY KEY (`id`) NOT ENFORCED
)""", str(self.schema))
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 3,059 | 40.917808 | 85 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_types.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import array
import ctypes
import datetime
import pickle
import sys
import tempfile
import unittest
from pyflink.pyflink_gateway_server import on_windows
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.java_gateway import get_gateway
from pyflink.table.types import (_infer_schema_from_data, _infer_type,
_array_signed_int_typecode_ctype_mappings,
_array_unsigned_int_typecode_ctype_mappings,
_array_type_mappings, _merge_type,
_create_type_verifier, UserDefinedType, DataTypes, Row, RowField,
RowType, ArrayType, BigIntType, VarCharType, MapType, DataType,
_from_java_data_type, ZonedTimestampType,
LocalZonedTimestampType, _to_java_data_type)
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ExamplePointUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sql_type(cls):
return DataTypes.ARRAY(DataTypes.DOUBLE(False))
@classmethod
def module(cls):
return 'pyflink.table.tests.test_types'
@classmethod
def java_udt(cls):
return 'org.apache.flink.table.types.python.ExamplePointUserDefinedType'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return ExamplePoint(datum[0], datum[1])
class ExamplePoint:
"""
An example class to demonstrate UDT in Java, and Python.
"""
__UDT__ = ExamplePointUDT()
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return "ExamplePoint(%s,%s)" % (self.x, self.y)
def __str__(self):
return "(%s,%s)" % (self.x, self.y)
def __eq__(self, other):
return isinstance(other, self.__class__) and other.x == self.x and other.y == self.y
class PythonOnlyUDT(UserDefinedType):
"""
User-defined type (UDT) for ExamplePoint.
"""
@classmethod
def sql_type(cls):
return DataTypes.ARRAY(DataTypes.DOUBLE(False))
@classmethod
def module(cls):
return '__main__'
def serialize(self, obj):
return [obj.x, obj.y]
def deserialize(self, datum):
return PythonOnlyPoint(datum[0], datum[1])
class PythonOnlyPoint(ExamplePoint):
"""
An example class to demonstrate UDT in only Python
"""
__UDT__ = PythonOnlyUDT()
class UTCOffsetTimezone(datetime.tzinfo):
"""
Specifies timezone in UTC offset
"""
def __init__(self, offset=0):
self.OFFSET = datetime.timedelta(hours=offset)
def utcoffset(self, dt):
return self.OFFSET
def dst(self, dt):
return self.OFFSET
class TypesTests(PyFlinkTestCase):
def test_infer_schema(self):
from decimal import Decimal
class A(object):
def __init__(self):
self.a = 1
from collections import namedtuple
Point = namedtuple('Point', 'x y')
data = [
True,
1,
"a",
u"a",
datetime.date(1970, 1, 1),
datetime.time(0, 0, 0),
datetime.datetime(1970, 1, 1, 0, 0),
1.0,
array.array("d", [1]),
[1],
(1,),
Point(1.0, 5.0),
{"a": 1},
bytearray(1),
Decimal(1),
Row(a=1),
Row("a")(1),
A(),
]
expected = [
'BooleanType(true)',
'BigIntType(true)',
'VarCharType(2147483647, true)',
'VarCharType(2147483647, true)',
'DateType(true)',
'TimeType(0, true)',
'LocalZonedTimestampType(6, true)',
'DoubleType(true)',
"ArrayType(DoubleType(false), true)",
"ArrayType(BigIntType(true), true)",
'RowType(RowField(_1, BigIntType(true), ...))',
'RowType(RowField(x, DoubleType(true), ...),RowField(y, DoubleType(true), ...))',
'MapType(VarCharType(2147483647, false), BigIntType(true), true)',
'VarBinaryType(2147483647, true)',
'DecimalType(38, 18, true)',
'RowType(RowField(a, BigIntType(true), ...))',
'RowType(RowField(a, BigIntType(true), ...))',
'RowType(RowField(a, BigIntType(true), ...))',
]
schema = _infer_schema_from_data([data])
self.assertEqual(expected, [repr(f.data_type) for f in schema.fields])
def test_infer_schema_nulltype(self):
elements = [Row(c1=[], c2={}, c3=None),
Row(c1=[Row(a=1, b='s')], c2={"key": Row(c=1.0, d="2")}, c3="")]
schema = _infer_schema_from_data(elements)
self.assertTrue(isinstance(schema, RowType))
self.assertEqual(3, len(schema.fields))
# first column is array
self.assertTrue(isinstance(schema.fields[0].data_type, ArrayType))
# element type of first column is struct
self.assertTrue(isinstance(schema.fields[0].data_type.element_type, RowType))
self.assertTrue(isinstance(schema.fields[0].data_type.element_type.fields[0].data_type,
BigIntType))
self.assertTrue(isinstance(schema.fields[0].data_type.element_type.fields[1].data_type,
VarCharType))
# second column is map
self.assertTrue(isinstance(schema.fields[1].data_type, MapType))
self.assertTrue(isinstance(schema.fields[1].data_type.key_type, VarCharType))
self.assertTrue(isinstance(schema.fields[1].data_type.value_type, RowType))
# third column is varchar
self.assertTrue(isinstance(schema.fields[2].data_type, VarCharType))
def test_infer_schema_not_enough_names(self):
schema = _infer_schema_from_data([["a", "b"]], ["col1"])
self.assertTrue(schema.names, ['col1', '_2'])
def test_infer_schema_fails(self):
with self.assertRaises(TypeError):
_infer_schema_from_data([[1, 1], ["x", 1]], names=["a", "b"])
def test_infer_nested_schema(self):
NestedRow = Row("f1", "f2")
data1 = [NestedRow([1, 2], {"row1": 1.0}), NestedRow([2, 3], {"row2": 2.0})]
schema1 = _infer_schema_from_data(data1)
expected1 = [
'ArrayType(BigIntType(true), true)',
'MapType(VarCharType(2147483647, false), DoubleType(true), true)'
]
self.assertEqual(expected1, [repr(f.data_type) for f in schema1.fields])
data2 = [NestedRow([[1, 2], [2, 3]], [1, 2]), NestedRow([[2, 3], [3, 4]], [2, 3])]
schema2 = _infer_schema_from_data(data2)
expected2 = [
'ArrayType(ArrayType(BigIntType(true), true), true)',
'ArrayType(BigIntType(true), true)'
]
self.assertEqual(expected2, [repr(f.data_type) for f in schema2.fields])
def test_convert_row_to_dict(self):
row = Row(l=[Row(a=1, b='s')], d={"key": Row(c=1.0, d="2")})
self.assertEqual(1, row.as_dict()['l'][0].a)
self.assertEqual(1.0, row.as_dict()['d']['key'].c)
def test_udt(self):
p = ExamplePoint(1.0, 2.0)
self.assertEqual(_infer_type(p), ExamplePointUDT())
_create_type_verifier(ExamplePointUDT())(ExamplePoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _create_type_verifier(ExamplePointUDT())([1.0, 2.0]))
p = PythonOnlyPoint(1.0, 2.0)
self.assertEqual(_infer_type(p), PythonOnlyUDT())
_create_type_verifier(PythonOnlyUDT())(PythonOnlyPoint(1.0, 2.0))
self.assertRaises(ValueError, lambda: _create_type_verifier(PythonOnlyUDT())([1.0, 2.0]))
def test_nested_udt_in_df(self):
expected_schema = DataTypes.ROW() \
.add("_1", DataTypes.BIGINT()).add("_2", DataTypes.ARRAY(PythonOnlyUDT()))
data = (1, [PythonOnlyPoint(float(1), float(2))])
self.assertEqual(expected_schema, _infer_type(data))
expected_schema = DataTypes.ROW().add("_1", DataTypes.BIGINT()).add(
"_2", DataTypes.MAP(DataTypes.BIGINT(False), PythonOnlyUDT()))
p = (1, {1: PythonOnlyPoint(1, float(2))})
self.assertEqual(expected_schema, _infer_type(p))
def test_struct_type(self):
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)),
DataTypes.FIELD("f2", DataTypes.STRING(nullable=True), None)])
self.assertEqual(row1.field_names(), row2.names)
self.assertEqual(row1, row2)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True))])
self.assertNotEqual(row1.field_names(), row2.names)
self.assertNotEqual(row1, row2)
row1 = (DataTypes.ROW().add(DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)))
.add("f2", DataTypes.STRING(nullable=True)))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)),
DataTypes.FIELD("f2", DataTypes.STRING(nullable=True))])
self.assertEqual(row1.field_names(), row2.names)
self.assertEqual(row1, row2)
row1 = (DataTypes.ROW().add(DataTypes.FIELD("f1", DataTypes.STRING(nullable=True)))
.add("f2", DataTypes.STRING(nullable=True)))
row2 = DataTypes.ROW([DataTypes.FIELD("f1", DataTypes.STRING(nullable=True))])
self.assertNotEqual(row1.field_names(), row2.names)
self.assertNotEqual(row1, row2)
# Catch exception raised during improper construction
self.assertRaises(ValueError, lambda: DataTypes.ROW().add("name"))
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
for field in row1:
self.assertIsInstance(field, RowField)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
self.assertEqual(len(row1), 2)
row1 = DataTypes.ROW().add("f1", DataTypes.STRING(nullable=True)) \
.add("f2", DataTypes.STRING(nullable=True))
self.assertIs(row1["f1"], row1.fields[0])
self.assertIs(row1[0], row1.fields[0])
self.assertEqual(row1[0:1], DataTypes.ROW(row1.fields[0:1]))
self.assertRaises(KeyError, lambda: row1["f9"])
self.assertRaises(IndexError, lambda: row1[9])
self.assertRaises(TypeError, lambda: row1[9.9])
def test_infer_bigint_type(self):
longrow = [Row(f1='a', f2=100000000000000)]
schema = _infer_schema_from_data(longrow)
self.assertEqual(DataTypes.BIGINT(), schema.fields[1].data_type)
self.assertEqual(DataTypes.BIGINT(), _infer_type(1))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 10))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 20))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 31 - 1))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 31))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 61))
self.assertEqual(DataTypes.BIGINT(), _infer_type(2 ** 71))
def test_merge_type(self):
self.assertEqual(_merge_type(DataTypes.BIGINT(), DataTypes.NULL()), DataTypes.BIGINT())
self.assertEqual(_merge_type(DataTypes.NULL(), DataTypes.BIGINT()), DataTypes.BIGINT())
self.assertEqual(_merge_type(DataTypes.BIGINT(), DataTypes.BIGINT()), DataTypes.BIGINT())
self.assertEqual(_merge_type(
DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.BIGINT())
), DataTypes.ARRAY(DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(DataTypes.ARRAY(DataTypes.BIGINT()), DataTypes.ARRAY(DataTypes.DOUBLE()))
self.assertEqual(_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())
), DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.DOUBLE(), DataTypes.BIGINT()))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.BIGINT()),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.DOUBLE()),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))]),
DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))])
), DataTypes.ROW([DataTypes.FIELD(
'f1', DataTypes.ROW([DataTypes.FIELD('f2', DataTypes.BIGINT())]))]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ROW(
[DataTypes.FIELD('f2', DataTypes.BIGINT())]))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ROW(
[DataTypes.FIELD('f2', DataTypes.STRING())]))]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.ARRAY(DataTypes.DOUBLE())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())])
), DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD('f2', DataTypes.STRING())]),
DataTypes.ROW([
DataTypes.FIELD('f1', DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE())),
DataTypes.FIELD('f2', DataTypes.STRING())]))
self.assertEqual(_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))])
), DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]))
with self.assertRaises(TypeError):
_merge_type(
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())))]),
DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.ARRAY(
DataTypes.MAP(DataTypes.DOUBLE(), DataTypes.BIGINT())))])
)
def test_array_types(self):
# This test need to make sure that the Scala type selected is at least
# as large as the python's types. This is necessary because python's
# array types depend on C implementation on the machine. Therefore there
# is no machine independent correspondence between python's array types
# and Scala types.
# See: https://docs.python.org/2/library/array.html
def assert_collect_success(typecode, value, element_type):
self.assertEqual(element_type,
str(_infer_type(array.array(typecode, [value])).element_type))
# supported string types
#
# String types in python's array are "u" for Py_UNICODE and "c" for char.
# "u" will be removed in python 4, and "c" is not supported in python 3.
supported_string_types = []
if sys.version_info[0] < 4:
supported_string_types += ['u']
# test unicode
assert_collect_success('u', u'a', 'CHAR')
# supported float and double
#
# Test max, min, and precision for float and double, assuming IEEE 754
# floating-point format.
supported_fractional_types = ['f', 'd']
assert_collect_success('f', ctypes.c_float(1e+38).value, 'FLOAT')
assert_collect_success('f', ctypes.c_float(1e-38).value, 'FLOAT')
assert_collect_success('f', ctypes.c_float(1.123456).value, 'FLOAT')
assert_collect_success('d', sys.float_info.max, 'DOUBLE')
assert_collect_success('d', sys.float_info.min, 'DOUBLE')
assert_collect_success('d', sys.float_info.epsilon, 'DOUBLE')
def get_int_data_type(size):
if size <= 8:
return "TINYINT"
if size <= 16:
return "SMALLINT"
if size <= 32:
return "INT"
if size <= 64:
return "BIGINT"
# supported signed int types
#
# The size of C types changes with implementation, we need to make sure
# that there is no overflow error on the platform running this test.
supported_signed_int_types = list(
set(_array_signed_int_typecode_ctype_mappings.keys()).intersection(
set(_array_type_mappings.keys())))
for t in supported_signed_int_types:
ctype = _array_signed_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assert_collect_success(t, max_val - 1, get_int_data_type(ctypes.sizeof(ctype) * 8))
assert_collect_success(t, -max_val, get_int_data_type(ctypes.sizeof(ctype) * 8))
# supported unsigned int types
#
# JVM does not have unsigned types. We need to be very careful to make
# sure that there is no overflow error.
supported_unsigned_int_types = list(
set(_array_unsigned_int_typecode_ctype_mappings.keys()).intersection(
set(_array_type_mappings.keys())))
for t in supported_unsigned_int_types:
ctype = _array_unsigned_int_typecode_ctype_mappings[t]
max_val = 2 ** (ctypes.sizeof(ctype) * 8 - 1)
assert_collect_success(t, max_val, get_int_data_type(ctypes.sizeof(ctype) * 8 + 1))
# all supported types
#
# Make sure the types tested above:
# 1. are all supported types
# 2. cover all supported types
supported_types = (supported_string_types +
supported_fractional_types +
supported_signed_int_types +
supported_unsigned_int_types)
self.assertEqual(set(supported_types), set(_array_type_mappings.keys()))
# all unsupported types
#
# Keys in _array_type_mappings is a complete list of all supported types,
# and types not in _array_type_mappings are considered unsupported.
all_types = set(array.typecodes)
unsupported_types = all_types - set(supported_types)
# test unsupported types
for t in unsupported_types:
with self.assertRaises(TypeError):
_infer_schema_from_data([Row(myarray=array.array(t))])
def test_data_type_eq(self):
lt = DataTypes.BIGINT()
lt2 = pickle.loads(pickle.dumps(DataTypes.BIGINT()))
self.assertEqual(lt, lt2)
def test_decimal_type(self):
t1 = DataTypes.DECIMAL(10, 0)
t2 = DataTypes.DECIMAL(10, 2)
self.assertTrue(t2 is not t1)
self.assertNotEqual(t1, t2)
def test_datetype_equal_zero(self):
dt = DataTypes.DATE()
self.assertEqual(dt.from_sql_type(0), datetime.date(1970, 1, 1))
@unittest.skipIf(on_windows(), "Windows x64 system only support the datetime not larger "
"than time.ctime(32536799999), so this test can't run "
"under Windows platform")
def test_timestamp_microsecond(self):
tst = DataTypes.TIMESTAMP()
self.assertEqual(tst.to_sql_type(datetime.datetime.max) % 1000000, 999999)
@unittest.skipIf(on_windows(), "Windows x64 system only support the datetime not larger "
"than time.ctime(32536799999), so this test can't run "
"under Windows platform")
def test_local_zoned_timestamp_type(self):
lztst = DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE()
last_abbreviation = DataTypes.TIMESTAMP_LTZ()
self.assertEqual(lztst, last_abbreviation)
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 0000)
self.assertEqual(0, lztst.to_sql_type(ts))
import pytz
# suppose the timezone of the data is +9:00
timezone = pytz.timezone("Asia/Tokyo")
orig_epoch = LocalZonedTimestampType.EPOCH_ORDINAL
try:
# suppose the local timezone is +8:00
LocalZonedTimestampType.EPOCH_ORDINAL = 28800000000
ts_tokyo = timezone.localize(ts)
self.assertEqual(-3600000000, lztst.to_sql_type(ts_tokyo))
finally:
LocalZonedTimestampType.EPOCH_ORDINAL = orig_epoch
if sys.version_info >= (3, 6):
ts2 = lztst.from_sql_type(0)
self.assertEqual(ts.astimezone(), ts2.astimezone())
def test_zoned_timestamp_type(self):
ztst = ZonedTimestampType()
ts = datetime.datetime(1970, 1, 1, 0, 0, 0, 0000, tzinfo=UTCOffsetTimezone(1))
self.assertEqual((0, 3600), ztst.to_sql_type(ts))
ts2 = ztst.from_sql_type((0, 3600))
self.assertEqual(ts, ts2)
def test_day_time_inteval_type(self):
ymt = DataTypes.INTERVAL(DataTypes.DAY(), DataTypes.SECOND())
td = datetime.timedelta(days=1, seconds=10)
self.assertEqual(86410000000, ymt.to_sql_type(td))
td2 = ymt.from_sql_type(86410000000)
self.assertEqual(td, td2)
def test_empty_row(self):
row = Row()
self.assertEqual(len(row), 0)
def test_invalid_create_row(self):
row_class = Row("c1", "c2")
self.assertRaises(ValueError, lambda: row_class(1, 2, 3))
def test_nullable(self):
t = DataType(nullable=False)
self.assertEqual(t._nullable, False)
t_nullable = t.nullable()
self.assertEqual(t_nullable._nullable, True)
def test_not_null(self):
t = DataType(nullable=True)
self.assertEqual(t._nullable, True)
t_notnull = t.not_null()
self.assertEqual(t_notnull._nullable, False)
class DataTypeVerificationTests(PyFlinkTestCase):
def test_verify_type_exception_msg(self):
self.assertRaises(
ValueError,
lambda: _create_type_verifier(
DataTypes.STRING(nullable=False), name="test_name")(None))
schema = DataTypes.ROW(
[DataTypes.FIELD('a', DataTypes.ROW([DataTypes.FIELD('b', DataTypes.INT())]))])
self.assertRaises(
TypeError,
lambda: _create_type_verifier(schema)([["data"]]))
def test_verify_type_ok_nullable(self):
obj = None
types = [DataTypes.INT(), DataTypes.FLOAT(), DataTypes.STRING(), DataTypes.ROW([])]
for data_type in types:
try:
_create_type_verifier(data_type)(obj)
except (TypeError, ValueError):
self.fail("verify_type(%s, %s, nullable=True)" % (obj, data_type))
def test_verify_type_not_nullable(self):
import array
import datetime
import decimal
schema = DataTypes.ROW([
DataTypes.FIELD('s', DataTypes.STRING(nullable=False)),
DataTypes.FIELD('i', DataTypes.INT(True))])
class MyObj:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
# obj, data_type
success_spec = [
# String
("", DataTypes.STRING()),
(u"", DataTypes.STRING()),
# UDT
(ExamplePoint(1.0, 2.0), ExamplePointUDT()),
# Boolean
(True, DataTypes.BOOLEAN()),
# TinyInt
(-(2 ** 7), DataTypes.TINYINT()),
(2 ** 7 - 1, DataTypes.TINYINT()),
# SmallInt
(-(2 ** 15), DataTypes.SMALLINT()),
(2 ** 15 - 1, DataTypes.SMALLINT()),
# Int
(-(2 ** 31), DataTypes.INT()),
(2 ** 31 - 1, DataTypes.INT()),
# BigInt
(2 ** 64, DataTypes.BIGINT()),
# Float & Double
(1.0, DataTypes.FLOAT()),
(1.0, DataTypes.DOUBLE()),
# Decimal
(decimal.Decimal("1.0"), DataTypes.DECIMAL(10, 0)),
# Binary
(bytearray([1]), DataTypes.BINARY(1)),
# Date/Time/Timestamp
(datetime.date(2000, 1, 2), DataTypes.DATE()),
(datetime.datetime(2000, 1, 2, 3, 4), DataTypes.DATE()),
(datetime.time(1, 1, 2), DataTypes.TIME()),
(datetime.datetime(2000, 1, 2, 3, 4), DataTypes.TIMESTAMP()),
# Array
([], DataTypes.ARRAY(DataTypes.INT())),
(["1", None], DataTypes.ARRAY(DataTypes.STRING(nullable=True))),
([1, 2], DataTypes.ARRAY(DataTypes.INT())),
((1, 2), DataTypes.ARRAY(DataTypes.INT())),
(array.array('h', [1, 2]), DataTypes.ARRAY(DataTypes.INT())),
# Map
({}, DataTypes.MAP(DataTypes.STRING(), DataTypes.INT())),
({"a": 1}, DataTypes.MAP(DataTypes.STRING(), DataTypes.INT())),
({"a": None}, DataTypes.MAP(DataTypes.STRING(nullable=False), DataTypes.INT(True))),
# Struct
({"s": "a", "i": 1}, schema),
({"s": "a", "i": None}, schema),
({"s": "a"}, schema),
({"s": "a", "f": 1.0}, schema),
(Row(s="a", i=1), schema),
(Row(s="a", i=None), schema),
(Row(s="a", i=1, f=1.0), schema),
(["a", 1], schema),
(["a", None], schema),
(("a", 1), schema),
(MyObj(s="a", i=1), schema),
(MyObj(s="a", i=None), schema),
(MyObj(s="a"), schema),
]
# obj, data_type, exception class
failure_spec = [
# Char/VarChar (match anything but None)
(None, DataTypes.VARCHAR(1), ValueError),
(None, DataTypes.CHAR(1), ValueError),
# VarChar (length exceeds maximum length)
("abc", DataTypes.VARCHAR(1), ValueError),
# Char (length exceeds length)
("abc", DataTypes.CHAR(1), ValueError),
# UDT
(ExamplePoint(1.0, 2.0), PythonOnlyUDT(), ValueError),
# Boolean
(1, DataTypes.BOOLEAN(), TypeError),
("True", DataTypes.BOOLEAN(), TypeError),
([1], DataTypes.BOOLEAN(), TypeError),
# TinyInt
(-(2 ** 7) - 1, DataTypes.TINYINT(), ValueError),
(2 ** 7, DataTypes.TINYINT(), ValueError),
("1", DataTypes.TINYINT(), TypeError),
(1.0, DataTypes.TINYINT(), TypeError),
# SmallInt
(-(2 ** 15) - 1, DataTypes.SMALLINT(), ValueError),
(2 ** 15, DataTypes.SMALLINT(), ValueError),
# Int
(-(2 ** 31) - 1, DataTypes.INT(), ValueError),
(2 ** 31, DataTypes.INT(), ValueError),
# Float & Double
(1, DataTypes.FLOAT(), TypeError),
(1, DataTypes.DOUBLE(), TypeError),
# Decimal
(1.0, DataTypes.DECIMAL(10, 0), TypeError),
(1, DataTypes.DECIMAL(10, 0), TypeError),
("1.0", DataTypes.DECIMAL(10, 0), TypeError),
# Binary
(1, DataTypes.BINARY(1), TypeError),
# VarBinary (length exceeds maximum length)
(bytearray([1, 2]), DataTypes.VARBINARY(1), ValueError),
# Char (length exceeds length)
(bytearray([1, 2]), DataTypes.BINARY(1), ValueError),
# Date/Time/Timestamp
("2000-01-02", DataTypes.DATE(), TypeError),
("10:01:02", DataTypes.TIME(), TypeError),
(946811040, DataTypes.TIMESTAMP(), TypeError),
# Array
(["1", None], DataTypes.ARRAY(DataTypes.VARCHAR(1, nullable=False)), ValueError),
([1, "2"], DataTypes.ARRAY(DataTypes.INT()), TypeError),
# Map
({"a": 1}, DataTypes.MAP(DataTypes.INT(), DataTypes.INT()), TypeError),
({"a": "1"}, DataTypes.MAP(DataTypes.VARCHAR(1), DataTypes.INT()), TypeError),
({"a": None}, DataTypes.MAP(DataTypes.VARCHAR(1), DataTypes.INT(False)), ValueError),
# Struct
({"s": "a", "i": "1"}, schema, TypeError),
(Row(s="a"), schema, ValueError), # Row can't have missing field
(Row(s="a", i="1"), schema, TypeError),
(["a"], schema, ValueError),
(["a", "1"], schema, TypeError),
(MyObj(s="a", i="1"), schema, TypeError),
(MyObj(s=None, i="1"), schema, ValueError),
]
# Check success cases
for obj, data_type in success_spec:
try:
_create_type_verifier(data_type.not_null())(obj)
except (TypeError, ValueError):
self.fail("verify_type(%s, %s, nullable=False)" % (obj, data_type))
# Check failure cases
for obj, data_type, exp in failure_spec:
msg = "verify_type(%s, %s, nullable=False) == %s" % (obj, data_type, exp)
with self.assertRaises(exp, msg=msg):
_create_type_verifier(data_type.not_null())(obj)
class DataTypeConvertTests(PyFlinkTestCase):
def test_basic_type(self):
test_types = [DataTypes.STRING(),
DataTypes.BOOLEAN(),
DataTypes.BYTES(),
DataTypes.TINYINT(),
DataTypes.SMALLINT(),
DataTypes.INT(),
DataTypes.BIGINT(),
DataTypes.FLOAT(),
DataTypes.DOUBLE(),
DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(3)]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_atomic_type_with_data_type_with_parameters(self):
gateway = get_gateway()
JDataTypes = gateway.jvm.DataTypes
java_types = [JDataTypes.TIME(3).notNull(),
JDataTypes.TIMESTAMP(3).notNull(),
JDataTypes.VARBINARY(100).notNull(),
JDataTypes.BINARY(2).notNull(),
JDataTypes.VARCHAR(30).notNull(),
JDataTypes.CHAR(50).notNull(),
JDataTypes.DECIMAL(20, 10).notNull()]
converted_python_types = [_from_java_data_type(item) for item in java_types]
expected = [DataTypes.TIME(3, False),
DataTypes.TIMESTAMP(3).not_null(),
DataTypes.VARBINARY(100, False),
DataTypes.BINARY(2, False),
DataTypes.VARCHAR(30, False),
DataTypes.CHAR(50, False),
DataTypes.DECIMAL(20, 10, False)]
self.assertEqual(converted_python_types, expected)
def test_array_type(self):
# nullable/not_null flag will be lost during the conversion.
test_types = [DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.BIGINT()),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT())),
DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_multiset_type(self):
test_types = [DataTypes.MULTISET(DataTypes.BIGINT()),
DataTypes.MULTISET(DataTypes.STRING()),
DataTypes.MULTISET(DataTypes.MULTISET(DataTypes.BIGINT())),
DataTypes.MULTISET(DataTypes.MULTISET(DataTypes.STRING()))]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_map_type(self):
test_types = [DataTypes.MAP(DataTypes.BIGINT(), DataTypes.BIGINT()),
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()),
DataTypes.MAP(DataTypes.STRING(),
DataTypes.MAP(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.MAP(DataTypes.STRING(),
DataTypes.MAP(DataTypes.STRING(), DataTypes.STRING()))]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_row_type(self):
test_types = [DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b",
DataTypes.ROW(
[DataTypes.FIELD("c",
DataTypes.STRING())]))])]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_list_view_type(self):
test_types = [DataTypes.LIST_VIEW(DataTypes.BIGINT()),
DataTypes.LIST_VIEW(DataTypes.STRING())]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
def test_map_view_type(self):
test_types = [DataTypes.MAP_VIEW(DataTypes.STRING(), DataTypes.BIGINT()),
DataTypes.MAP_VIEW(DataTypes.INT(), DataTypes.STRING())]
java_types = [_to_java_data_type(item) for item in test_types]
converted_python_types = [_from_java_data_type(item) for item in java_types]
self.assertEqual(test_types, converted_python_types)
class DataSerializerTests(PyFlinkTestCase):
def test_java_pickle_deserializer(self):
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = PickleSerializer()
data = [(1, 2), (3, 4), (5, 6), (7, 8)]
try:
serializer.serialize(data, temp_file)
finally:
temp_file.close()
gateway = get_gateway()
result = [tuple(int_pair) for int_pair in
list(gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, False))]
self.assertEqual(result, [(1, 2), (3, 4), (5, 6), (7, 8)])
def test_java_batch_deserializer(self):
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(PickleSerializer(), 2)
data = [(1, 2), (3, 4), (5, 6), (7, 8)]
try:
serializer.serialize(data, temp_file)
finally:
temp_file.close()
gateway = get_gateway()
result = [tuple(int_pair) for int_pair in
list(gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name, True))]
self.assertEqual(result, [(1, 2), (3, 4), (5, 6), (7, 8)])
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 39,663 | 40.145228 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_environment_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table import TableEnvironment
class EnvironmentAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`TableEnvironment` is consistent with
Java `org.apache.flink.table.api.TableEnvironment`.
"""
@classmethod
def python_class(cls):
return TableEnvironment
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.TableEnvironment"
@classmethod
def excluded_methods(cls):
# getCompletionHints has been deprecated. It will be removed in the next release.
return {
'getCompletionHints',
'fromValues',
# See FLINK-25986
'loadPlan',
'compilePlanSql',
'executePlan',
'explainPlan'}
@classmethod
def java_method_name(cls, python_method_name):
"""
Due to 'from' is python keyword, so we use 'from_path'
in Python API corresponding 'from' in Java API.
:param python_method_name:
:return:
"""
py_func_to_java_method_dict = {'from_path': 'from',
"from_descriptor": "from",
"create_java_function": "create_function"}
return py_func_to_java_method_dict.get(python_method_name, python_method_name)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,685 | 36.305556 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_set_operation.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
from pyflink.table import TableEnvironment, EnvironmentSettings
from pyflink.testing.test_case_utils import PyFlinkTestCase
class StreamTableSetOperationTests(PyFlinkTestCase):
data1 = [(1, "Hi", "Hello")]
data2 = [(3, "Hello", "Hello")]
schema = ["a", "b", "c"]
def setUp(self) -> None:
self.t_env = TableEnvironment.create(EnvironmentSettings.in_batch_mode())
def test_minus(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.minus(t2)
self.assertEqual('MINUS', result._j_table.getQueryOperation().getType().toString())
self.assertFalse(result._j_table.getQueryOperation().isAll())
def test_minus_all(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.minus_all(t2)
self.assertEqual('MINUS', result._j_table.getQueryOperation().getType().toString())
self.assertTrue(result._j_table.getQueryOperation().isAll())
def test_union(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.union(t2)
self.assertEqual('UNION', result._j_table.getQueryOperation().getType().toString())
self.assertFalse(result._j_table.getQueryOperation().isAll())
def test_union_all(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.union_all(t2)
self.assertEqual('UNION', result._j_table.getQueryOperation().getType().toString())
self.assertTrue(result._j_table.getQueryOperation().isAll())
def test_intersect(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.intersect(t2)
self.assertEqual('INTERSECT', result._j_table.getQueryOperation().getType().toString())
self.assertFalse(result._j_table.getQueryOperation().isAll())
def test_intersect_all(self):
t_env = self.t_env
t1 = t_env.from_elements(self.data1, self.schema)
t2 = t_env.from_elements(self.data2, self.schema)
result = t1.intersect_all(t2)
self.assertEqual('INTERSECT', result._j_table.getQueryOperation().getType().toString())
self.assertTrue(result._j_table.getQueryOperation().isAll())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 3,843 | 39.041667 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_udaf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import datetime
import uuid
from decimal import Decimal
import pandas as pd
from pandas.testing import assert_frame_equal
from pyflink.common import Row, RowKind
from pyflink.fn_execution.state_impl import RemovableConcatIterator
from pyflink.table import DataTypes
from pyflink.table.data_view import ListView, MapView
from pyflink.table.expressions import col, call, lit, row_interval
from pyflink.table.udf import AggregateFunction, udaf
from pyflink.table.window import Tumble, Slide, Session
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
def generate_random_table_name():
return "Table{0}".format(str(uuid.uuid1()).replace("-", "_"))
class CountAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] = accumulator[0] + 1
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - 1
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] = accumulator[0] + other_acc[0]
def get_accumulator_type(self):
return 'ARRAY<BIGINT>'
def get_result_type(self):
return 'BIGINT'
class SumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[0]
def create_accumulator(self):
return [0]
def accumulate(self, accumulator, *args):
accumulator[0] = accumulator[0] + args[0]
def retract(self, accumulator, *args):
accumulator[0] = accumulator[0] - args[0]
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] = accumulator[0] + other_acc[0]
def get_accumulator_type(self):
return 'ARRAY<BIGINT>'
def get_result_type(self):
return 'BIGINT'
class ConcatAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
str_list = [i for i in accumulator[0]]
str_list.sort()
return accumulator[1].join(str_list)
def create_accumulator(self):
return Row([], '')
def accumulate(self, accumulator, *args):
if args[0] is not None:
accumulator[1] = args[1]
accumulator[0].append(args[0])
def retract(self, accumulator, *args):
if args[0] is not None:
accumulator[0].remove(args[0])
def get_accumulator_type(self):
return 'ROW<f0 STRING, f1 BIGINT>'
def get_result_type(self):
return 'STRING'
class ListViewConcatAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[1].join(accumulator[0])
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.STRING()
class CountDistinctAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return accumulator[1]
def create_accumulator(self):
return Row(MapView(), 0)
def accumulate(self, accumulator, *args):
input_str = args[0]
if accumulator[0].is_empty() or input_str not in accumulator[0] \
or accumulator[0][input_str] is None:
accumulator[0][input_str] = 1
accumulator[1] += 1
else:
accumulator[0][input_str] += 1
if input_str == "clear":
accumulator[0].clear()
accumulator[1] = 0
def retract(self, accumulator, *args):
input_str = args[0]
if accumulator[0].is_empty() or input_str not in accumulator[0]:
return
accumulator[0].put_all({input_str: accumulator[0][input_str] - 1})
if accumulator[0][input_str] <= 0:
accumulator[1] -= 1
accumulator[0][input_str] = None
def get_accumulator_type(self):
return 'ROW<f0 MAP<STRING, STRING>, f1 BIGINT>'
def get_result_type(self):
return 'BIGINT'
class CustomIterateAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
# test iterate keys
key_set = [i for i in accumulator[0]]
key_set.sort()
# test iterate values
value_set = [str(i) for i in accumulator[0].values()]
value_set.sort()
item_set = {}
# test iterate items
for key, value in accumulator[0].items():
item_set[key] = value
ordered_item_set = collections.OrderedDict()
for key in key_set:
ordered_item_set[key] = str(item_set[key])
try:
# test auto clear the cached iterators
next(iter(accumulator[0].items()))
except StopIteration:
pass
return Row(",".join(key_set),
','.join(value_set),
",".join([":".join(item) for item in ordered_item_set.items()]),
accumulator[1])
def create_accumulator(self):
return Row(MapView(), 0)
def accumulate(self, accumulator, *args):
input_str = args[0]
if input_str not in accumulator[0]:
accumulator[0][input_str] = 1
accumulator[1] += 1
else:
accumulator[0][input_str] += 1
def retract(self, accumulator, *args):
input_str = args[0]
if input_str not in accumulator[0]:
return
accumulator[0][input_str] -= 1
if accumulator[0][input_str] == 0:
# test removable iterator
key_iter = iter(accumulator[0].keys()) # type: RemovableConcatIterator
while True:
try:
key = next(key_iter)
if key == input_str:
key_iter.remove()
except StopIteration:
break
accumulator[1] -= 1
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.MAP_VIEW(DataTypes.STRING(), DataTypes.BIGINT())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.STRING()),
DataTypes.FIELD("f1", DataTypes.STRING()),
DataTypes.FIELD("f2", DataTypes.STRING()),
DataTypes.FIELD("f3", DataTypes.BIGINT())])
class StreamTableAggregateTests(PyFlinkStreamTableTestCase):
@classmethod
def setUpClass(cls):
super(StreamTableAggregateTests, cls).setUpClass()
cls.t_env.create_temporary_system_function("my_count", CountAggregateFunction())
cls.t_env.create_temporary_function("my_sum", SumAggregateFunction())
cls.t_env.create_temporary_system_function("concat", ConcatAggregateFunction())
cls.t_env.create_temporary_system_function("my_count_distinct",
CountDistinctAggregateFunction())
def test_double_aggregate(self):
# trigger the finish bundle more frequently to ensure testing the communication
# between RemoteKeyedStateBackend and the StateGrpcService.
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().set(
"python.state.cache-size", "1")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi2'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.select(call("my_count", t.a).alias("a"),
call("my_sum", t.a).alias("b"), t.c) \
.select(call("my_count", col("a")).alias("a"),
call("my_sum", col("b")).alias("b"),
call("sum0", col("b")).alias("c"),
call("sum0", col("b").cast(DataTypes.DOUBLE())).alias("d"))
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[3, 12, 12, 12.0]], columns=['a', 'b', 'c', 'd']))
def test_mixed_with_built_in_functions_with_retract(self):
self.t_env.get_config().set("parallelism.default", "1")
t = self.t_env.from_elements(
[(1, 'Hi_', 1),
(1, 'Hi', 2),
(2, 'Hi_', 3),
(2, 'Hi', 4),
(3, None, None),
(3, None, None),
(4, 'hello2_', 7),
(4, 'hello2', 8),
(5, 'hello_', 9),
(5, 'hello', 10)], ['a', 'b', 'c'])
self.t_env.create_temporary_view(
"test_mixed_with_built_in_functions_with_retract_source", t)
table_with_retract_message = self.t_env.sql_query(
"select a, LAST_VALUE(b) as b, LAST_VALUE(c) as c from "
"test_mixed_with_built_in_functions_with_retract_source group by a")
self.t_env.create_temporary_view(
"test_mixed_with_built_in_functions_with_retract_retract_table",
table_with_retract_message)
result_table = self.t_env.sql_query(
"select concat(b, ',') as a, "
"FIRST_VALUE(b) as b, "
"LAST_VALUE(b) as c, "
"COUNT(c) as d, "
"COUNT(1) as e, "
"LISTAGG(b) as f,"
"LISTAGG(b, '|') as g,"
"MAX(c) as h,"
"MAX(cast(c as float) + 1) as i,"
"MIN(c) as j,"
"MIN(cast(c as decimal) + 1) as k,"
"SUM(c) as l,"
"SUM(cast(c as float) + 1) as m,"
"AVG(c) as n,"
"AVG(cast(c as double) + 1) as o,"
"STDDEV_POP(cast(c as float)),"
"STDDEV_SAMP(cast(c as float)),"
"VAR_POP(cast(c as float)),"
"VAR_SAMP(cast(c as float))"
" from test_mixed_with_built_in_functions_with_retract_retract_table")
result = [i for i in result_table.execute().collect()]
expected = Row('Hi,Hi,hello,hello2', 'Hi', 'hello', 4, 5, 'Hi,Hi,hello2,hello',
'Hi|Hi|hello2|hello', 10, 11.0, 2, Decimal(3.0), 24, 28.0, 6, 7.0,
3.1622777, 3.6514838, 10.0, 13.333333)
expected.set_row_kind(RowKind.UPDATE_AFTER)
self.assertEqual(result[len(result) - 1], expected)
def test_mixed_with_built_in_functions_without_retract(self):
self.t_env.get_config().set("parallelism.default", "1")
t = self.t_env.from_elements(
[('Hi', 2),
('Hi', 4),
(None, None),
('hello2', 8),
('hello', 10)], ['b', 'c'])
self.t_env.create_temporary_view(
"test_mixed_with_built_in_functions_without_retract_source", t)
result_table = self.t_env.sql_query(
"select concat(b, ',') as a, "
"FIRST_VALUE(b) as b, "
"LAST_VALUE(b) as c, "
"COUNT(c) as d, "
"COUNT(1) as e, "
"LISTAGG(b) as f,"
"LISTAGG(b, '|') as g,"
"MAX(c) as h,"
"MAX(cast(c as float) + 1) as i,"
"MIN(c) as j,"
"MIN(cast(c as decimal) + 1) as k,"
"SUM(c) as l,"
"SUM(cast(c as float) + 1) as m "
"from test_mixed_with_built_in_functions_without_retract_source")
result = [i for i in result_table.execute().collect()]
expected = Row('Hi,Hi,hello,hello2', 'Hi', 'hello', 4, 5, 'Hi,Hi,hello2,hello',
'Hi|Hi|hello2|hello', 10, 11.0, 2, Decimal(3.0), 24, 28.0)
expected.set_row_kind(RowKind.UPDATE_AFTER)
self.assertEqual(result[len(result) - 1], expected)
def test_using_decorator(self):
my_count = udaf(CountAggregateFunction(),
accumulator_type=DataTypes.ARRAY(DataTypes.INT()),
result_type=DataTypes.INT())
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.select(my_count(t.a).alias("a"), t.c.alias("b"))
plan = result.explain()
result_type = result.get_schema().get_field_data_type(0)
self.assertTrue(plan.find("PythonGroupAggregate(groupBy=[c], ") >= 0)
self.assertEqual(result_type, DataTypes.INT())
def test_list_view(self):
my_concat = udaf(ListViewConcatAggregateFunction())
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().set(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(my_concat(t.b, ',').alias("a"), t.c)
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"]], columns=['a', 'c']))
def test_map_view(self):
my_count = udaf(CountDistinctAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().set(
"python.state.cache-size", "1")
self.t_env.get_config().set(
"python.map-state.read-cache-size", "1")
self.t_env.get_config().set(
"python.map-state.write-cache-size", "1")
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("test_map_view_source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from test_map_view_source group by a")
result = table_with_retract_message.group_by(t.c).select(my_count(t.b).alias("a"), t.c)
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([[2, "hello"],
[3, "hi"]], columns=['a', 'c']))
def test_data_view_clear(self):
my_count = udaf(CountDistinctAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().set(
"python.state.cache-size", "1")
t = self.t_env.from_elements(
[(2, 'hello', 'hello'),
(4, 'clear', 'hello'),
(6, 'hello2', 'hello'),
(8, 'hello', 'hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(my_count(t.b).alias("a"), t.c)
assert_frame_equal(result.to_pandas(),
pd.DataFrame([[2, "hello"]], columns=['a', 'c']))
def test_map_view_iterate(self):
test_iterate = udaf(CustomIterateAggregateFunction())
self.t_env.get_config().set_idle_state_retention(datetime.timedelta(days=1))
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().set(
"python.state.cache-size", "2")
self.t_env.get_config().set(
"python.map-state.read-cache-size", "2")
self.t_env.get_config().set(
"python.map-state.write-cache-size", "2")
self.t_env.get_config().set(
"python.map-state.iterate-response-batch-size", "2")
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("test_map_view_iterate_source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from test_map_view_iterate_source "
"group by a")
result = table_with_retract_message.group_by(t.c) \
.select(test_iterate(t.b).alias("a"), t.c) \
.select(col("a").get(0).alias("a"),
col("a").get(1).alias("b"),
col("a").get(2).alias("c"),
col("a").get(3).alias("d"),
t.c.alias("e"))
assert_frame_equal(
result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([
["Hi,Hi2,Hi3", "1,2,3", "Hi:3,Hi2:2,Hi3:1", 3, "hi"],
["hello,hello2", "1,3", 'hello:3,hello2:1', 2, "hello"]],
columns=['a', 'b', 'c', 'd', 'e']))
def test_distinct_and_filter(self):
t = self.t_env.from_elements(
[(1, 'Hi_', 'hi'),
(1, 'Hi', 'hi'),
(2, 'hello', 'hello'),
(3, 'Hi_', 'hi'),
(3, 'Hi', 'hi'),
(4, 'hello', 'hello'),
(5, 'Hi2_', 'hi'),
(5, 'Hi2', 'hi'),
(6, 'hello2', 'hello'),
(7, 'Hi', 'hi'),
(8, 'hello', 'hello'),
(9, 'Hi2', 'hi'),
(13, 'Hi3', 'hi')], ['a', 'b', 'c'])
self.t_env.create_temporary_view("source", t)
table_with_retract_message = self.t_env.sql_query(
"select LAST_VALUE(b) as b, LAST_VALUE(c) as c from source group by a")
self.t_env.create_temporary_view("retract_table", table_with_retract_message)
result = self.t_env.sql_query(
"select concat(distinct b, '.') as a, "
"concat(distinct b, ',') filter (where c = 'hi') as b, "
"concat(distinct b, ',') filter (where c = 'hello') as c, "
"c as d "
"from retract_table group by c")
assert_frame_equal(result.to_pandas().sort_values(by='a').reset_index(drop=True),
pd.DataFrame([["Hi.Hi2.Hi3", "Hi,Hi2,Hi3", "", "hi"],
["hello.hello2", "", "hello,hello2", "hello"]],
columns=['a', 'b', 'c', 'd']))
def test_clean_state(self):
self.t_env.get_config().set("parallelism.default", "1")
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "1")
self.t_env.get_config().set(
"python.state.cache-size", "0")
self.t_env.get_config().set(
"table.exec.state.ttl", "2ms")
source_table = generate_random_table_name()
self.t_env.execute_sql(f"""
CREATE TABLE {source_table}(
a BIGINT
) WITH (
'connector' = 'datagen',
'number-of-rows' = '5',
'rows-per-second' = '1'
)
""")
sink_table = generate_random_table_name()
self.t_env.execute_sql(f"""
CREATE TABLE {sink_table}(
a BIGINT
) WITH ('connector' = 'blackhole')
""")
t = self.t_env.from_path(source_table)
t.select(call("my_count", t.a).alias("a")).execute_insert(sink_table).wait()
def test_tumbling_group_window_over_time(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:30:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_tumbling_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table_name = generate_random_table_name()
source_table = f"""
create table {source_table_name}(
a TINYINT,
b SMALLINT,
c INT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path(source_table_name)
from pyflink.testing import source_sink_utils
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b TIMESTAMP(3), c TIMESTAMP(3), d BIGINT, e BIGINT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Tumble.over(lit(1).hours).on(t.rowtime).alias("w")) \
.group_by(t.a, col("w")) \
.select(t.a,
col("w").start,
col("w").end,
t.c.count.alias("c"),
call("my_count_distinct", t.c).alias("d")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2, 2018-03-11T03:00, 2018-03-11T04:00, 2, 1]",
"+I[3, 2018-03-11T03:00, 2018-03-11T04:00, 1, 1]",
"+I[1, 2018-03-11T03:00, 2018-03-11T04:00, 2, 2]",
"+I[1, 2018-03-11T04:00, 2018-03-11T05:00, 1, 1]"])
def test_tumbling_group_window_over_count(self):
self.t_env.get_config().set("parallelism.default", "1")
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_tumbling_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table_name = generate_random_table_name()
source_table = f"""
create table {source_table_name}(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path(source_table_name)
from pyflink.testing import source_sink_utils
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, d BIGINT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Tumble.over(row_interval(2)).on(t.protime).alias("w")) \
.group_by(t.a, col("w")) \
.select(t.a, call("my_sum", t.c).alias("b")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 5]", "+I[2, 4]", "+I[3, 5]"])
def test_sliding_group_window_over_time(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:30:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_sliding_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table_name = generate_random_table_name()
source_table = f"""
create table {source_table_name}(
a TINYINT,
b SMALLINT,
c INT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path(source_table_name)
from pyflink.testing import source_sink_utils
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b TIMESTAMP(3), c TIMESTAMP(3), d BIGINT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Slide.over(lit(1).hours)
.every(lit(30).minutes)
.on(t.rowtime)
.alias("w")) \
.group_by(t.a, col("w")) \
.select(t.a, col("w").start, col("w").end, call("my_sum", t.c).alias("c")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11T02:30, 2018-03-11T03:30, 2]",
"+I[2, 2018-03-11T02:30, 2018-03-11T03:30, 1]",
"+I[3, 2018-03-11T02:30, 2018-03-11T03:30, 2]",
"+I[1, 2018-03-11T03:00, 2018-03-11T04:00, 5]",
"+I[3, 2018-03-11T03:00, 2018-03-11T04:00, 2]",
"+I[2, 2018-03-11T03:00, 2018-03-11T04:00, 2]",
"+I[2, 2018-03-11T03:30, 2018-03-11T04:30, 1]",
"+I[1, 2018-03-11T03:30, 2018-03-11T04:30, 11]",
"+I[1, 2018-03-11T04:00, 2018-03-11T05:00, 8]"])
def test_sliding_group_window_over_count(self):
self.t_env.get_config().set("parallelism.default", "1")
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.create_temporary_system_function("my_sum", SumAggregateFunction())
source_table_name = generate_random_table_name()
source_table = f"""
create table {source_table_name}(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path(source_table_name)
from pyflink.testing import source_sink_utils
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, d BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Slide.over(row_interval(2)).every(row_interval(1)).on(t.protime).alias("w")) \
.group_by(t.a, col("w")) \
.select(t.a, call("my_sum", t.c).alias("b")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 5]", "+I[1, 11]", "+I[2, 4]", "+I[3, 5]"])
def test_session_group_window_over_time(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_session_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table_name = generate_random_table_name()
source_table = f"""
create table {source_table_name}(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table)
t = self.t_env.from_path(source_table_name)
from pyflink.testing import source_sink_utils
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b TIMESTAMP(3), c TIMESTAMP(3), d BIGINT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Session.with_gap(lit(30).minutes).on(t.rowtime).alias("w")) \
.group_by(t.a, t.b, col("w")) \
.select(t.a, col("w").start, col("w").end, call("my_count", t.c).alias("c")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[3, 2018-03-11T03:10, 2018-03-11T03:40, 1]",
"+I[2, 2018-03-11T03:10, 2018-03-11T04:00, 2]",
"+I[1, 2018-03-11T03:10, 2018-03-11T04:10, 2]",
"+I[1, 2018-03-11T04:20, 2018-03-11T04:50, 1]"])
def test_execute_group_aggregate_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = ['1,1', '3,2', '1,3']
source_path = tmp_dir + '/test_execute_group_aggregate_from_json_plan.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table_name = generate_random_table_name()
source_table = f"""
CREATE TABLE {source_table_name} (
a BIGINT,
b BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '{source_path}',
'format' = 'csv'
)
"""
self.t_env.execute_sql(source_table)
sink_table = generate_random_table_name()
self.t_env.execute_sql(f"""
CREATE TABLE {sink_table} (
a BIGINT,
b BIGINT
) WITH (
'connector' = 'blackhole'
)
""")
json_plan = self.t_env._j_tenv.compilePlanSql(f"""
INSERT INTO {sink_table}
SELECT a, my_sum(b) FROM {source_table_name}
GROUP BY a
""")
from py4j.java_gateway import get_method
get_method(json_plan.execute(), "await")()
def test_execute_group_window_aggregate_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_execute_group_window_aggregate_from_json_plan.csv'
sink_path = tmp_dir + '/test_execute_group_window_aggregate_from_json_plan'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table_name = generate_random_table_name()
source_table = f"""
CREATE TABLE {source_table_name} (
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) WITH (
'connector' = 'filesystem',
'path' = '{source_path}',
'format' = 'csv'
)
"""
self.t_env.execute_sql(source_table)
sink_table = generate_random_table_name()
self.t_env.execute_sql(f"""
CREATE TABLE {sink_table} (
a BIGINT,
w_start TIMESTAMP(3),
w_end TIMESTAMP(3),
b BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '{sink_path}',
'format' = 'csv'
)
""")
json_plan = self.t_env._j_tenv.compilePlanSql(f"""
INSERT INTO {sink_table}
SELECT a, SESSION_START(rowtime, INTERVAL '30' MINUTE),
SESSION_END(rowtime, INTERVAL '30' MINUTE),
my_count(c) FROM {source_table_name}
GROUP BY a, b, SESSION(rowtime, INTERVAL '30' MINUTE)
""")
from py4j.java_gateway import get_method
get_method(json_plan.execute(), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines,
['1,"2018-03-11 03:10:00","2018-03-11 04:10:00",2',
'1,"2018-03-11 04:20:00","2018-03-11 04:50:00",1',
'2,"2018-03-11 03:10:00","2018-03-11 04:00:00",2',
'3,"2018-03-11 03:10:00","2018-03-11 03:40:00",1'])
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 37,444 | 38.835106 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_pandas_udf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import pytz
from pyflink.common import Row
from pyflink.table import DataTypes
from pyflink.table.tests.test_udf import SubtractOne
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase, PyFlinkTestCase
class PandasUDFTests(PyFlinkTestCase):
def test_non_exist_func_type(self):
with self.assertRaisesRegex(ValueError,
'The func_type must be one of \'general, pandas\''):
udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), func_type="non-exist")
class PandasUDFITTests(object):
def test_basic_functionality(self):
# pandas UDF
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), func_type="pandas")
# general Python UDF
subtract_one = udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT())
sink_table_ddl = """
CREATE TABLE Results_test_basic_functionality(
a BIGINT,
b BIGINT,
c BIGINT,
d BIGINT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.where(add_one(t.b) <= 3) \
.select(t.a, t.b + 1, add(t.a + 1, subtract_one(t.c)) + 2, add(add_one(t.a), 1)) \
.execute_insert("Results_test_basic_functionality") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 3, 6, 3]", "+I[3, 2, 14, 5]"])
def test_all_data_types(self):
import pandas as pd
import numpy as np
@udf(result_type=DataTypes.TINYINT(), func_type="pandas")
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, pd.Series)
assert isinstance(tinyint_param[0], np.int8), \
'tinyint_param of wrong type %s !' % type(tinyint_param[0])
return tinyint_param
@udf(result_type=DataTypes.SMALLINT(), func_type="pandas")
def smallint_func(smallint_param):
assert isinstance(smallint_param, pd.Series)
assert isinstance(smallint_param[0], np.int16), \
'smallint_param of wrong type %s !' % type(smallint_param[0])
assert smallint_param[0] == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
@udf(result_type=DataTypes.INT(), func_type="pandas")
def int_func(int_param):
assert isinstance(int_param, pd.Series)
assert isinstance(int_param[0], np.int32), \
'int_param of wrong type %s !' % type(int_param[0])
assert int_param[0] == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
@udf(result_type=DataTypes.BIGINT(), func_type="pandas")
def bigint_func(bigint_param):
assert isinstance(bigint_param, pd.Series)
assert isinstance(bigint_param[0], np.int64), \
'bigint_param of wrong type %s !' % type(bigint_param[0])
return bigint_param
@udf(result_type=DataTypes.BOOLEAN(), func_type="pandas")
def boolean_func(boolean_param):
assert isinstance(boolean_param, pd.Series)
assert isinstance(boolean_param[0], np.bool_), \
'boolean_param of wrong type %s !' % type(boolean_param[0])
return boolean_param
@udf(result_type=DataTypes.FLOAT(), func_type="pandas")
def float_func(float_param):
assert isinstance(float_param, pd.Series)
assert isinstance(float_param[0], np.float32), \
'float_param of wrong type %s !' % type(float_param[0])
return float_param
@udf(result_type=DataTypes.DOUBLE(), func_type="pandas")
def double_func(double_param):
assert isinstance(double_param, pd.Series)
assert isinstance(double_param[0], np.float64), \
'double_param of wrong type %s !' % type(double_param[0])
return double_param
@udf(result_type=DataTypes.STRING(), func_type="pandas")
def varchar_func(varchar_param):
assert isinstance(varchar_param, pd.Series)
assert isinstance(varchar_param[0], str), \
'varchar_param of wrong type %s !' % type(varchar_param[0])
return varchar_param
@udf(result_type=DataTypes.BYTES(), func_type="pandas")
def varbinary_func(varbinary_param):
assert isinstance(varbinary_param, pd.Series)
assert isinstance(varbinary_param[0], bytes), \
'varbinary_param of wrong type %s !' % type(varbinary_param[0])
return varbinary_param
@udf(result_type=DataTypes.DECIMAL(38, 18), func_type="pandas")
def decimal_func(decimal_param):
assert isinstance(decimal_param, pd.Series)
assert isinstance(decimal_param[0], decimal.Decimal), \
'decimal_param of wrong type %s !' % type(decimal_param[0])
return decimal_param
@udf(result_type=DataTypes.DATE(), func_type="pandas")
def date_func(date_param):
assert isinstance(date_param, pd.Series)
assert isinstance(date_param[0], datetime.date), \
'date_param of wrong type %s !' % type(date_param[0])
return date_param
@udf(result_type=DataTypes.TIME(), func_type="pandas")
def time_func(time_param):
assert isinstance(time_param, pd.Series)
assert isinstance(time_param[0], datetime.time), \
'time_param of wrong type %s !' % type(time_param[0])
return time_param
timestamp_value = datetime.datetime(1970, 1, 2, 0, 0, 0, 123000)
@udf(result_type=DataTypes.TIMESTAMP(3), func_type="pandas")
def timestamp_func(timestamp_param):
assert isinstance(timestamp_param, pd.Series)
assert isinstance(timestamp_param[0], datetime.datetime), \
'timestamp_param of wrong type %s !' % type(timestamp_param[0])
assert timestamp_param[0] == timestamp_value, \
'timestamp_param is wrong value %s, should be %s!' % (timestamp_param[0],
timestamp_value)
return timestamp_param
def array_func(array_param):
assert isinstance(array_param, pd.Series)
assert isinstance(array_param[0], np.ndarray), \
'array_param of wrong type %s !' % type(array_param[0])
return array_param
array_str_func = udf(array_func,
result_type=DataTypes.ARRAY(DataTypes.STRING()),
func_type="pandas")
array_timestamp_func = udf(array_func,
result_type=DataTypes.ARRAY(DataTypes.TIMESTAMP(3)),
func_type="pandas")
array_int_func = udf(array_func,
result_type=DataTypes.ARRAY(DataTypes.INT()),
func_type="pandas")
@udf(result_type=DataTypes.ARRAY(DataTypes.STRING()), func_type="pandas")
def nested_array_func(nested_array_param):
assert isinstance(nested_array_param, pd.Series)
assert isinstance(nested_array_param[0], np.ndarray), \
'nested_array_param of wrong type %s !' % type(nested_array_param[0])
return pd.Series(nested_array_param[0])
row_type = DataTypes.ROW(
[DataTypes.FIELD("f1", DataTypes.INT()),
DataTypes.FIELD("f2", DataTypes.STRING()),
DataTypes.FIELD("f3", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("f4", DataTypes.ARRAY(DataTypes.INT()))])
@udf(result_type=row_type, func_type="pandas")
def row_func(row_param):
assert isinstance(row_param, pd.DataFrame)
assert isinstance(row_param.f1, pd.Series)
assert isinstance(row_param.f1[0], np.int32), \
'row_param.f1 of wrong type %s !' % type(row_param.f1[0])
assert isinstance(row_param.f2, pd.Series)
assert isinstance(row_param.f2[0], str), \
'row_param.f2 of wrong type %s !' % type(row_param.f2[0])
assert isinstance(row_param.f3, pd.Series)
assert isinstance(row_param.f3[0], datetime.datetime), \
'row_param.f3 of wrong type %s !' % type(row_param.f3[0])
assert isinstance(row_param.f4, pd.Series)
assert isinstance(row_param.f4[0], np.ndarray), \
'row_param.f4 of wrong type %s !' % type(row_param.f4[0])
return row_param
map_type = DataTypes.MAP(DataTypes.STRING(False), DataTypes.STRING())
@udf(result_type=map_type, func_type="pandas")
def map_func(map_param):
assert isinstance(map_param, pd.Series)
return map_param
@udf(result_type=DataTypes.BINARY(5), func_type="pandas")
def binary_func(binary_param):
assert isinstance(binary_param, pd.Series)
assert isinstance(binary_param[0], bytes), \
'binary_param of wrong type %s !' % type(binary_param[0])
assert len(binary_param[0]) == 5
return binary_param
sink_table_ddl = """
CREATE TABLE Results_test_all_data_types(
a TINYINT,
b SMALLINT,
c INT,
d BIGINT,
e BOOLEAN,
f BOOLEAN,
g FLOAT,
h DOUBLE,
i STRING,
j StRING,
k BYTES,
l DECIMAL(38, 18),
m DECIMAL(38, 18),
n DATE,
o TIME,
p TIMESTAMP(3),
q ARRAY<STRING>,
r ARRAY<TIMESTAMP(3)>,
s ARRAY<INT>,
t ARRAY<STRING>,
u ROW<f1 INT, f2 STRING, f3 TIMESTAMP(3), f4 ARRAY<INT>>,
v MAP<STRING, STRING>,
w BINARY(5)
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements(
[(1, 32767, -2147483648, 1, True, False, 1.0, 1.0, 'hello', '中文',
bytearray(b'flink'), decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'),
datetime.date(2014, 9, 13), datetime.time(hour=1, minute=0, second=1),
timestamp_value, ['hello', '中文', None], [timestamp_value], [1, 2],
[['hello', '中文', None]], Row(1, 'hello', timestamp_value, [1, 2]),
{"1": "hello", "2": "world"}, bytearray(b'flink'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("d", DataTypes.BIGINT()),
DataTypes.FIELD("e", DataTypes.BOOLEAN()),
DataTypes.FIELD("f", DataTypes.BOOLEAN()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.STRING()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.BYTES()),
DataTypes.FIELD("l", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("m", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("n", DataTypes.DATE()),
DataTypes.FIELD("o", DataTypes.TIME()),
DataTypes.FIELD("p", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("q", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("r", DataTypes.ARRAY(DataTypes.TIMESTAMP(3))),
DataTypes.FIELD("s", DataTypes.ARRAY(DataTypes.INT())),
DataTypes.FIELD("t", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))),
DataTypes.FIELD("u", row_type),
DataTypes.FIELD("v", map_type),
DataTypes.FIELD("w", DataTypes.BINARY(5))]))
t.select(
tinyint_func(t.a),
smallint_func(t.b),
int_func(t.c),
bigint_func(t.d),
boolean_func(t.e),
boolean_func(t.f),
float_func(t.g),
double_func(t.h),
varchar_func(t.i),
varchar_func(t.j),
varbinary_func(t.k),
decimal_func(t.l),
decimal_func(t.m),
date_func(t.n),
time_func(t.o),
timestamp_func(t.p),
array_str_func(t.q),
array_timestamp_func(t.r),
array_int_func(t.s),
nested_array_func(t.t),
row_func(t.u),
map_func(t.v),
binary_func(t.w)) \
.execute_insert("Results_test_all_data_types").wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 32767, -2147483648, 1, true, false, 1.0, 1.0, hello, 中文, "
"[102, 108, 105, 110, 107], 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999, 2014-09-13, 01:00:01, "
"1970-01-02T00:00:00.123, [hello, 中文, null], [1970-01-02T00:00:00.123], "
"[1, 2], [hello, 中文, null], +I[1, hello, 1970-01-02T00:00:00.123, [1, 2]], "
"{1=hello, 2=world}, [102, 108, 105, 110, 107]]"])
def test_invalid_pandas_udf(self):
@udf(result_type=DataTypes.INT(), func_type="pandas")
def length_mismatch(i):
return i[1:]
@udf(result_type=DataTypes.INT(), func_type="pandas")
def result_type_not_series(i):
return i.iloc[0]
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
msg = "The result length '0' of Pandas UDF 'length_mismatch' is not equal " \
"to the input length '1'"
from py4j.protocol import Py4JJavaError
with self.assertRaisesRegex(Py4JJavaError, expected_regex=msg):
t.select(length_mismatch(t.a)).to_pandas()
msg = "The result type of Pandas UDF 'result_type_not_series' must be pandas.Series or " \
"pandas.DataFrame, got <class 'numpy.int64'>"
from py4j.protocol import Py4JJavaError
with self.assertRaisesRegex(Py4JJavaError, expected_regex=msg):
t.select(result_type_not_series(t.a)).to_pandas()
def test_data_types(self):
import pandas as pd
timezone = self.t_env.get_config().get_local_timezone()
local_datetime = pytz.timezone(timezone).localize(
datetime.datetime(1970, 1, 2, 0, 0, 0, 123000))
@udf(result_type=DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3), func_type="pandas")
def local_zoned_timestamp_func(local_zoned_timestamp_param):
assert isinstance(local_zoned_timestamp_param, pd.Series)
assert isinstance(local_zoned_timestamp_param[0], datetime.datetime), \
'local_zoned_timestamp_param of wrong type %s !' % type(
local_zoned_timestamp_param[0])
assert local_zoned_timestamp_param[0] == local_datetime, \
'local_zoned_timestamp_param is wrong value %s, %s!' % \
(local_zoned_timestamp_param[0], local_datetime)
return local_zoned_timestamp_param
sink_table_ddl = """
CREATE TABLE Results_test_data_types(a TIMESTAMP_LTZ(3)) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements(
[(local_datetime,)],
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))]))
t.select(local_zoned_timestamp_func(local_zoned_timestamp_func(t.a))) \
.execute_insert("Results_test_data_types").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1970-01-02T00:00:00.123Z]"])
class BatchPandasUDFITTests(PandasUDFITTests,
PyFlinkBatchTableTestCase):
pass
class StreamPandasUDFITTests(PandasUDFITTests,
PyFlinkStreamTableTestCase):
pass
@udf(result_type=DataTypes.BIGINT(), func_type='pandas')
def add(i, j):
return i + j
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 18,014 | 42.939024 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_udf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import os
import unittest
import uuid
import pytz
from pyflink.common import Row
from pyflink.table import DataTypes, expressions as expr
from pyflink.table.expressions import call
from pyflink.table.udf import ScalarFunction, udf, FunctionContext
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkBatchTableTestCase
def generate_random_table_name():
return "Table{0}".format(str(uuid.uuid1()).replace("-", "_"))
class UserDefinedFunctionTests(object):
def test_scalar_function(self):
# test metric disabled.
self.t_env.get_config().set('python.metric.enabled', 'false')
self.t_env.get_config().set('pipeline.global-job-parameters', 'subtract_value:2')
# test lambda function
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
# test Python ScalarFunction
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
subtract_two = udf(SubtractWithParameters(), result_type=DataTypes.BIGINT())
# test callable function
add_one_callable = udf(CallablePlus(), result_type=DataTypes.BIGINT())
def partial_func(col, param):
return col + param
# test partial function
import functools
add_one_partial = udf(functools.partial(partial_func, param=1),
result_type=DataTypes.BIGINT())
# check memory limit is set
@udf(result_type=DataTypes.BIGINT())
def check_memory_limit(exec_mode):
if exec_mode == "process":
assert os.environ['_PYTHON_WORKER_MEMORY_LIMIT'] is not None
return 1
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT, b BIGINT, c BIGINT, d BIGINT, e BIGINT, f BIGINT,
g BIGINT, h BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
execution_mode = self.t_env.get_config().get("python.execution-mode", "process")
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.where(add_one(t.b) <= 3).select(
add_one(t.a),
subtract_one(t.b),
subtract_two(t.b),
add(t.a, t.c),
add_one_callable(t.a),
add_one_partial(t.a),
check_memory_limit(execution_mode),
t.a).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, 1, 0, 4, 2, 2, 1, 1]", "+I[4, 0, -1, 12, 4, 4, 1, 3]"])
def test_chaining_scalar_function(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT, b BIGINT, c INT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2, 1), (2, 5, 2), (3, 1, 3)], ['a', 'b', 'c'])
t.select(add(add_one(t.a), subtract_one(t.b)), t.c, expr.lit(1)) \
.execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 1, 1]", "+I[7, 2, 1]", "+I[4, 3, 1]"])
def test_udf_in_join_condition(self):
t1 = self.t_env.from_elements([(2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
f = udf(lambda i: i, result_type=DataTypes.BIGINT())
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT, b STRING, c BIGINT, d StRING)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t1.join(t2).where(f(t1.a) == t2.c).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, Hi, 2, Flink]"])
def test_udf_in_join_condition_2(self):
t1 = self.t_env.from_elements([(1, "Hi"), (2, "Hi")], ['a', 'b'])
t2 = self.t_env.from_elements([(2, "Flink")], ['c', 'd'])
f = udf(lambda i: i, result_type=DataTypes.BIGINT())
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a BIGINT,
b STRING,
c BIGINT,
d STRING
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t1.join(t2).where(f(t1.a) == f(t2.c)).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, Hi, 2, Flink]"])
def test_udf_with_constant_params(self):
def udf_with_constant_params(p, null_param, tinyint_param, smallint_param, int_param,
bigint_param, decimal_param, float_param, double_param,
boolean_param, str_param,
date_param, time_param, timestamp_param):
from decimal import Decimal
import datetime
assert null_param is None, 'null_param is wrong value %s' % null_param
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
p += tinyint_param
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
p += smallint_param
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
p += int_param
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
p += bigint_param
assert decimal_param == Decimal('1.05'), \
'decimal_param is wrong value %s ' % decimal_param
p += int(decimal_param)
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-06), \
'float_param is wrong value %s ' % float_param
p += int(float_param)
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-07), \
'double_param is wrong value %s ' % double_param
p += int(double_param)
assert boolean_param is True, 'boolean_param is wrong value %s' % boolean_param
assert str_param == 'flink', 'str_param is wrong value %s' % str_param
assert date_param == datetime.date(year=2014, month=9, day=13), \
'date_param is wrong value %s' % date_param
assert time_param == datetime.time(hour=12, minute=0, second=0), \
'time_param is wrong value %s' % time_param
assert timestamp_param == datetime.datetime(1999, 9, 10, 5, 20, 10), \
'timestamp_param is wrong value %s' % timestamp_param
return p
self.t_env.create_temporary_system_function("udf_with_constant_params",
udf(udf_with_constant_params,
result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"udf_with_all_constant_params", udf(lambda i, j: i + j,
result_type=DataTypes.BIGINT()))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
self.t_env.create_temporary_view("test_table", t)
self.t_env.sql_query("select udf_with_all_constant_params("
"cast (1 as BIGINT),"
"cast (2 as BIGINT)), "
"udf_with_constant_params(a, "
"cast (null as BIGINT),"
"cast (1 as TINYINT),"
"cast (1 as SMALLINT),"
"cast (1 as INT),"
"cast (1 as BIGINT),"
"cast (1.05 as DECIMAL),"
"cast (1.23 as FLOAT),"
"cast (1.98932 as DOUBLE),"
"true,"
"'flink',"
"cast ('2014-09-13' as DATE),"
"cast ('12:00:00' as TIME),"
"cast ('1999-9-10 05:20:10' as TIMESTAMP))"
" from test_table").execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 8]", "+I[3, 9]", "+I[3, 10]"])
def test_overwrite_builtin_function(self):
self.t_env.create_temporary_system_function(
"plus", udf(lambda i, j: i + j - 1,
result_type=DataTypes.BIGINT()))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.select(t.a + t.b).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2]", "+I[6]", "+I[3]"])
def test_open(self):
self.t_env.get_config().set('python.metric.enabled', 'true')
execution_mode = self.t_env.get_config().get("python.execution-mode", None)
if execution_mode == "process":
subtract = udf(SubtractWithMetrics(), result_type=DataTypes.BIGINT())
else:
subtract = udf(Subtract(), result_type=DataTypes.BIGINT())
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 4)], ['a', 'b'])
t.select(t.a, subtract(t.b)).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 1]", "+I[2, 4]", "+I[3, 3]"])
def test_udf_without_arguments(self):
one = udf(lambda: 1, result_type=DataTypes.BIGINT(), deterministic=True)
two = udf(lambda: 2, result_type=DataTypes.BIGINT(), deterministic=False)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(one(), two()).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2]", "+I[1, 2]", "+I[1, 2]"])
def test_all_data_types_expression(self):
@udf(result_type=DataTypes.BOOLEAN())
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
@udf(result_type=DataTypes.TINYINT())
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
@udf(result_type=DataTypes.SMALLINT())
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
@udf(result_type=DataTypes.INT())
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
@udf(result_type=DataTypes.BIGINT())
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
@udf(result_type=DataTypes.BIGINT())
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
@udf(result_type=DataTypes.FLOAT())
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
@udf(result_type=DataTypes.DOUBLE())
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
@udf(result_type=DataTypes.BYTES())
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
@udf(result_type=DataTypes.STRING())
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
@udf(result_type=DataTypes.DATE())
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
@udf(result_type=DataTypes.TIME())
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
@udf(result_type=DataTypes.TIMESTAMP(3))
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
@udf(result_type=DataTypes.ARRAY(DataTypes.BIGINT()))
def array_func(array_param):
assert array_param == [[1, 2, 3]] or array_param == ((1, 2, 3),), \
'array_param is wrong value %s !' % array_param
return array_param[0]
@udf(result_type=DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING()))
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
@udf(result_type=DataTypes.DECIMAL(38, 18))
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
@udf(result_type=DataTypes.DECIMAL(38, 18))
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
@udf(result_type=DataTypes.BINARY(5))
def binary_func(binary_param):
assert len(binary_param) == 5
return binary_param
@udf(result_type=DataTypes.CHAR(7))
def char_func(char_param):
assert len(char_param) == 7
return char_param
@udf(result_type=DataTypes.VARCHAR(10))
def varchar_func(varchar_param):
assert len(varchar_param) <= 10
return varchar_param
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a BIGINT,
b BIGINT,
c TINYINT,
d BOOLEAN,
e SMALLINT,
f INT,
g FLOAT,
h DOUBLE,
i BYTES,
j STRING,
k DATE,
l TIME,
m TIMESTAMP(3),
n ARRAY<BIGINT>,
o MAP<BIGINT, STRING>,
p DECIMAL(38, 18),
q DECIMAL(38, 18),
r BINARY(5),
s CHAR(7),
t VARCHAR(10)
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'),
bytearray(b'flink'), 'pyflink', 'pyflink')],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("r", DataTypes.BINARY(5)),
DataTypes.FIELD("s", DataTypes.CHAR(7)),
DataTypes.FIELD("t", DataTypes.VARCHAR(10))]))
t.select(
bigint_func(t.a),
bigint_func_none(t.b),
tinyint_func(t.c),
boolean_func(t.d),
smallint_func(t.e),
int_func(t.f),
float_func(t.g),
double_func(t.h),
bytes_func(t.i),
str_func(t.j),
date_func(t.k),
time_func(t.l),
timestamp_func(t.m),
array_func(t.n),
map_func(t.o),
decimal_func(t.p),
decimal_cut_func(t.q),
binary_func(t.r),
char_func(t.s),
varchar_func(t.t)) \
.execute_insert(sink_table).wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, 12:00:00.123, "
"2018-03-11T03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999, [102, 108, 105, 110, 107], "
"pyflink, pyflink]"])
def test_all_data_types(self):
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
def array_func(array_param):
assert array_param == [[1, 2, 3]] or array_param == ((1, 2, 3),), \
'array_param is wrong value %s !' % array_param
return array_param[0]
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
self.t_env.create_temporary_system_function(
"boolean_func", udf(boolean_func, result_type=DataTypes.BOOLEAN()))
self.t_env.create_temporary_system_function(
"tinyint_func", udf(tinyint_func, result_type=DataTypes.TINYINT()))
self.t_env.create_temporary_system_function(
"smallint_func", udf(smallint_func, result_type=DataTypes.SMALLINT()))
self.t_env.create_temporary_system_function(
"int_func", udf(int_func, result_type=DataTypes.INT()))
self.t_env.create_temporary_system_function(
"bigint_func", udf(bigint_func, result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"bigint_func_none", udf(bigint_func_none, result_type=DataTypes.BIGINT()))
self.t_env.create_temporary_system_function(
"float_func", udf(float_func, result_type=DataTypes.FLOAT()))
self.t_env.create_temporary_system_function(
"double_func", udf(double_func, result_type=DataTypes.DOUBLE()))
self.t_env.create_temporary_system_function(
"bytes_func", udf(bytes_func, result_type=DataTypes.BYTES()))
self.t_env.create_temporary_system_function(
"str_func", udf(str_func, result_type=DataTypes.STRING()))
self.t_env.create_temporary_system_function(
"date_func", udf(date_func, result_type=DataTypes.DATE()))
self.t_env.create_temporary_system_function(
"time_func", udf(time_func, result_type=DataTypes.TIME()))
self.t_env.create_temporary_system_function(
"timestamp_func", udf(timestamp_func, result_type=DataTypes.TIMESTAMP(3)))
self.t_env.create_temporary_system_function(
"array_func", udf(array_func, result_type=DataTypes.ARRAY(DataTypes.BIGINT())))
self.t_env.create_temporary_system_function(
"map_func", udf(map_func,
result_type=DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())))
self.t_env.create_temporary_system_function(
"decimal_func", udf(decimal_func, result_type=DataTypes.DECIMAL(38, 18)))
self.t_env.create_temporary_system_function(
"decimal_cut_func", udf(decimal_cut_func, result_type=DataTypes.DECIMAL(38, 18)))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a BIGINT, b BIGINT, c TINYINT, d BOOLEAN, e SMALLINT, f INT, g FLOAT, h DOUBLE,
i BYTES, j STRING, k DATE, l TIME, m TIMESTAMP(3), n ARRAY<BIGINT>,
o MAP<BIGINT, STRING>, p DECIMAL(38, 18), q DECIMAL(38, 18))
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))]))
t.select(call("bigint_func", t.a), call("bigint_func_none", t.b),
call("tinyint_func", t.c), call("boolean_func", t.d),
call("smallint_func", t.e), call("int_func", t.f),
call("float_func", t.g), call("double_func", t.h),
call("bytes_func", t.i), call("str_func", t.j),
call("date_func", t.k), call("time_func", t.l),
call("timestamp_func", t.m), call("array_func", t.n),
call("map_func", t.o), call("decimal_func", t.p),
call("decimal_cut_func", t.q)) \
.execute_insert(sink_table).wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, "
"12:00:00.123, 2018-03-11T03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999]"])
def test_create_and_drop_function(self):
t_env = self.t_env
t_env.create_temporary_system_function(
"add_one_func", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
t_env.create_temporary_function(
"subtract_one_func", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
self.assertTrue('add_one_func' in t_env.list_user_defined_functions())
self.assertTrue('subtract_one_func' in t_env.list_user_defined_functions())
t_env.drop_temporary_system_function("add_one_func")
t_env.drop_temporary_function("subtract_one_func")
self.assertTrue('add_one_func' not in t_env.list_user_defined_functions())
self.assertTrue('subtract_one_func' not in t_env.list_user_defined_functions())
# decide whether two floats are equal
def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
class PyFlinkStreamUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkStreamTableTestCase):
def test_deterministic(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.assertTrue(add_one._deterministic)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), deterministic=False)
self.assertFalse(add_one._deterministic)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
self.assertTrue(subtract_one._deterministic)
with self.assertRaises(ValueError, msg="Inconsistent deterministic: False and True"):
udf(SubtractOne(), result_type=DataTypes.BIGINT(), deterministic=False)
self.assertTrue(add._deterministic)
@udf(result_type=DataTypes.BIGINT(), deterministic=False)
def non_deterministic_udf(i):
return i
self.assertFalse(non_deterministic_udf._deterministic)
def test_name(self):
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.assertEqual("<lambda>", add_one._name)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT(), name="add_one")
self.assertEqual("add_one", add_one._name)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT())
self.assertEqual("SubtractOne", subtract_one._name)
subtract_one = udf(SubtractOne(), result_type=DataTypes.BIGINT(), name="subtract_one")
self.assertEqual("subtract_one", subtract_one._name)
self.assertEqual("add", add._name)
@udf(result_type=DataTypes.BIGINT(), name="named")
def named_udf(i):
return i
self.assertEqual("named", named_udf._name)
def test_abc(self):
class UdfWithoutEval(ScalarFunction):
def open(self, function_context):
pass
with self.assertRaises(
TypeError,
msg="Can't instantiate abstract class UdfWithoutEval with abstract methods eval"):
UdfWithoutEval()
def test_invalid_udf(self):
class Plus(object):
def eval(self, col):
return col + 1
with self.assertRaises(
TypeError,
msg="Invalid function: not a function or callable (__call__ is not defined)"):
# test non-callable function
self.t_env.create_temporary_system_function(
"non-callable-udf", udf(Plus(), DataTypes.BIGINT(), DataTypes.BIGINT()))
def test_data_types(self):
timezone = self.t_env.get_config().get_local_timezone()
local_datetime = pytz.timezone(timezone).localize(
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000))
@udf(result_type=DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))
def local_zoned_timestamp_func(local_zoned_timestamp_param):
assert local_zoned_timestamp_param == local_datetime, \
'local_zoned_timestamp_param is wrong value %s !' % local_zoned_timestamp_param
return local_zoned_timestamp_param
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TIMESTAMP_LTZ(3)) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements(
[(local_datetime,)],
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))]))
t.select(local_zoned_timestamp_func(local_zoned_timestamp_func(t.a))) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1970-01-01T00:00:00.123Z]"])
def test_execute_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = ['1,1', '3,3', '2,2']
source_path = tmp_dir + '/test_execute_from_json_plan_input.csv'
sink_path = tmp_dir + '/test_execute_from_json_plan_out'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = """
CREATE TABLE source_table (
a BIGINT,
b BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % source_path
self.t_env.execute_sql(source_table)
self.t_env.execute_sql("""
CREATE TABLE sink_table (
id BIGINT,
data BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % sink_path)
add_one = udf(lambda i: i + 1, result_type=DataTypes.BIGINT())
self.t_env.create_temporary_system_function("add_one", add_one)
json_plan = self.t_env._j_tenv.compilePlanSql("INSERT INTO sink_table SELECT "
"a, "
"add_one(b) "
"FROM source_table")
from py4j.java_gateway import get_method
get_method(json_plan.execute(), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,2', '2,3', '3,4'])
def test_udf_with_rowtime_arguments(self):
from pyflink.common import WatermarkStrategy
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.table import Schema
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[0])
ds = self.env.from_collection(
[(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW_NAMED(["a", "b", "c"], [Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
table = self.t_env.from_data_stream(
ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
@udf(result_type=DataTypes.ROW([DataTypes.FIELD('f1', DataTypes.INT())]))
def inc(input_row):
return Row(input_row.b)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a INT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
table.map(inc).execute_insert(sink_table).wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ['+I[42]', '+I[5]', '+I[1000]', '+I[1000]'])
class PyFlinkBatchUserDefinedFunctionTests(UserDefinedFunctionTests,
PyFlinkBatchTableTestCase):
pass
class PyFlinkEmbeddedThreadTests(UserDefinedFunctionTests, PyFlinkBatchTableTestCase):
def setUp(self):
super(PyFlinkEmbeddedThreadTests, self).setUp()
self.t_env.get_config().set("python.execution-mode", "thread")
def test_all_data_types_string(self):
@udf(result_type='BOOLEAN')
def boolean_func(bool_param):
assert isinstance(bool_param, bool), 'bool_param of wrong type %s !' \
% type(bool_param)
return bool_param
@udf(result_type='TINYINT')
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, int), 'tinyint_param of wrong type %s !' \
% type(tinyint_param)
return tinyint_param
@udf(result_type='SMALLINT')
def smallint_func(smallint_param):
assert isinstance(smallint_param, int), 'smallint_param of wrong type %s !' \
% type(smallint_param)
assert smallint_param == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
@udf(result_type='INT')
def int_func(int_param):
assert isinstance(int_param, int), 'int_param of wrong type %s !' \
% type(int_param)
assert int_param == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
@udf(result_type='BIGINT')
def bigint_func(bigint_param):
assert isinstance(bigint_param, int), 'bigint_param of wrong type %s !' \
% type(bigint_param)
return bigint_param
@udf(result_type='BIGINT')
def bigint_func_none(bigint_param):
assert bigint_param is None, 'bigint_param %s should be None!' % bigint_param
return bigint_param
@udf(result_type='FLOAT')
def float_func(float_param):
assert isinstance(float_param, float) and float_equal(float_param, 1.23, 1e-6), \
'float_param is wrong value %s !' % float_param
return float_param
@udf(result_type='DOUBLE')
def double_func(double_param):
assert isinstance(double_param, float) and float_equal(double_param, 1.98932, 1e-7), \
'double_param is wrong value %s !' % double_param
return double_param
@udf(result_type='BYTES')
def bytes_func(bytes_param):
assert bytes_param == b'flink', \
'bytes_param is wrong value %s !' % bytes_param
return bytes_param
@udf(result_type='STRING')
def str_func(str_param):
assert str_param == 'pyflink', \
'str_param is wrong value %s !' % str_param
return str_param
@udf(result_type='DATE')
def date_func(date_param):
from datetime import date
assert date_param == date(year=2014, month=9, day=13), \
'date_param is wrong value %s !' % date_param
return date_param
@udf(result_type='TIME')
def time_func(time_param):
from datetime import time
assert time_param == time(hour=12, minute=0, second=0, microsecond=123000), \
'time_param is wrong value %s !' % time_param
return time_param
@udf(result_type='TIMESTAMP(3)')
def timestamp_func(timestamp_param):
from datetime import datetime
assert timestamp_param == datetime(2018, 3, 11, 3, 0, 0, 123000), \
'timestamp_param is wrong value %s !' % timestamp_param
return timestamp_param
@udf(result_type='ARRAY<BIGINT>')
def array_func(array_param):
assert array_param == [[1, 2, 3]] or array_param == ((1, 2, 3),), \
'array_param is wrong value %s !' % array_param
return array_param[0]
@udf(result_type='MAP<BIGINT, STRING>')
def map_func(map_param):
assert map_param == {1: 'flink', 2: 'pyflink'}, \
'map_param is wrong value %s !' % map_param
return map_param
@udf(result_type='DECIMAL(38, 18)')
def decimal_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.050000000000000000'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
@udf(result_type='DECIMAL(38, 18)')
def decimal_cut_func(decimal_param):
from decimal import Decimal
assert decimal_param == Decimal('1000000000000000000.059999999999999999'), \
'decimal_param is wrong value %s !' % decimal_param
return decimal_param
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a BIGINT, b BIGINT, c TINYINT, d BOOLEAN, e SMALLINT, f INT, g FLOAT, h DOUBLE, i BYTES,
j STRING, k DATE, l TIME, m TIMESTAMP(3), n ARRAY<BIGINT>, o MAP<BIGINT, STRING>,
p DECIMAL(38, 18), q DECIMAL(38, 18)) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
import datetime
import decimal
t = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [[1, 2, 3]],
{1: 'flink', 2: 'pyflink'}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.BIGINT()))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.STRING())),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("q", DataTypes.DECIMAL(38, 18))]))
t.select(
bigint_func(t.a),
bigint_func_none(t.b),
tinyint_func(t.c),
boolean_func(t.d),
smallint_func(t.e),
int_func(t.f),
float_func(t.g),
double_func(t.h),
bytes_func(t.i),
str_func(t.j),
date_func(t.k),
time_func(t.l),
timestamp_func(t.m),
array_func(t.n),
map_func(t.o),
decimal_func(t.p),
decimal_cut_func(t.q)) \
.execute_insert(sink_table).wait()
actual = source_sink_utils.results()
# Currently the sink result precision of DataTypes.TIME(precision) only supports 0.
self.assert_equals(actual,
["+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, "
"[102, 108, 105, 110, 107], pyflink, 2014-09-13, 12:00:00.123, "
"2018-03-11T03:00:00.123, [1, 2, 3], "
"{1=flink, 2=pyflink}, 1000000000000000000.050000000000000000, "
"1000000000000000000.059999999999999999]"])
# test specify the input_types
@udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT())
def add(i, j):
return i + j
class SubtractOne(ScalarFunction):
def eval(self, i):
return i - 1
class SubtractWithParameters(ScalarFunction):
def open(self, function_context: FunctionContext):
self.subtract_value = int(function_context.get_job_parameter("subtract_value", "1"))
def eval(self, i):
return i - self.subtract_value
class SubtractWithMetrics(ScalarFunction, unittest.TestCase):
def open(self, function_context):
self.subtracted_value = 1
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def eval(self, i):
# counter
self.counter.inc(i)
self.counter_sum += i
return i - self.subtracted_value
class Subtract(ScalarFunction, unittest.TestCase):
def open(self, function_context):
self.subtracted_value = 1
self.counter_sum = 0
def eval(self, i):
# counter
self.counter_sum += i
return i - self.subtracted_value
class CallablePlus(object):
def __call__(self, col):
return col + 1
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 49,795 | 42.15078 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_descriptor.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import sys
from pyflink.table.descriptors import (Rowtime, Schema)
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import DataTypes
from pyflink.testing.test_case_utils import PyFlinkTestCase
class RowTimeDescriptorTests(PyFlinkTestCase):
def test_timestamps_from_field(self):
rowtime = Rowtime().timestamps_from_field("rtime")
properties = rowtime.to_properties()
expected = {'rowtime.timestamps.type': 'from-field', 'rowtime.timestamps.from': 'rtime'}
self.assertEqual(expected, properties)
def test_timestamps_from_source(self):
rowtime = Rowtime().timestamps_from_source()
properties = rowtime.to_properties()
expected = {'rowtime.timestamps.type': 'from-source'}
self.assertEqual(expected, properties)
def test_timestamps_from_extractor(self):
rowtime = Rowtime().timestamps_from_extractor(
"org.apache.flink.table.utils.TestingDescriptors$CustomExtractor")
properties = rowtime.to_properties()
expected = {
'rowtime.timestamps.type': 'custom',
'rowtime.timestamps.class':
'org.apache.flink.table.utils.TestingDescriptors$CustomExtractor',
'rowtime.timestamps.serialized':
'rO0ABXNyAD9vcmcuYXBhY2hlLmZsaW5rLnRhYmxlLnV0aWxzLlRlc3RpbmdEZXNjcmlwdG9ycyRDdXN0b2'
'1FeHRyYWN0b3K-MntVKO8Z7QIAAUwABWZpZWxkdAASTGphdmEvbGFuZy9TdHJpbmc7eHIAPm9yZy5hcGFj'
'aGUuZmxpbmsudGFibGUuc291cmNlcy50c2V4dHJhY3RvcnMuVGltZXN0YW1wRXh0cmFjdG9yX9WOqYhTbB'
'gCAAB4cHQAAnRz'}
self.assertEqual(expected, properties)
def test_watermarks_periodic_ascending(self):
rowtime = Rowtime().watermarks_periodic_ascending()
properties = rowtime.to_properties()
expected = {'rowtime.watermarks.type': 'periodic-ascending'}
self.assertEqual(expected, properties)
def test_watermarks_periodic_bounded(self):
rowtime = Rowtime().watermarks_periodic_bounded(1000)
properties = rowtime.to_properties()
expected = {'rowtime.watermarks.type': 'periodic-bounded',
'rowtime.watermarks.delay': '1000'}
self.assertEqual(expected, properties)
def test_watermarks_from_source(self):
rowtime = Rowtime().watermarks_from_source()
properties = rowtime.to_properties()
expected = {'rowtime.watermarks.type': 'from-source'}
self.assertEqual(expected, properties)
def test_watermarks_from_strategy(self):
rowtime = Rowtime().watermarks_from_strategy(
"org.apache.flink.table.utils.TestingDescriptors$CustomAssigner")
properties = rowtime.to_properties()
expected = {
'rowtime.watermarks.type': 'custom',
'rowtime.watermarks.class':
'org.apache.flink.table.utils.TestingDescriptors$CustomAssigner',
'rowtime.watermarks.serialized':
'rO0ABXNyAD5vcmcuYXBhY2hlLmZsaW5rLnRhYmxlLnV0aWxzLlRlc3RpbmdEZXNjcmlwdG9ycyRDdXN0b2'
'1Bc3NpZ25lcsY_Xt96bBjDAgAAeHIAR29yZy5hcGFjaGUuZmxpbmsudGFibGUuc291cmNlcy53bXN0cmF0'
'ZWdpZXMuUHVuY3R1YXRlZFdhdGVybWFya0Fzc2lnbmVygVHOe6GlrvQCAAB4cgA9b3JnLmFwYWNoZS5mbG'
'luay50YWJsZS5zb3VyY2VzLndtc3RyYXRlZ2llcy5XYXRlcm1hcmtTdHJhdGVned57foNjlmk-AgAAeHA'}
self.assertEqual(expected, properties)
class SchemaDescriptorTests(PyFlinkTestCase):
def test_field(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("long_field", DataTypes.BIGINT())\
.field("string_field", DataTypes.STRING())\
.field("timestamp_field", DataTypes.TIMESTAMP(3))\
.field("time_field", DataTypes.TIME())\
.field("date_field", DataTypes.DATE())\
.field("double_field", DataTypes.DOUBLE())\
.field("float_field", DataTypes.FLOAT())\
.field("byte_field", DataTypes.TINYINT())\
.field("short_field", DataTypes.SMALLINT())\
.field("boolean_field", DataTypes.BOOLEAN())
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)',
'schema.3.name': 'timestamp_field',
'schema.3.data-type': 'TIMESTAMP(3)',
'schema.4.name': 'time_field',
'schema.4.data-type': 'TIME(0)',
'schema.5.name': 'date_field',
'schema.5.data-type': 'DATE',
'schema.6.name': 'double_field',
'schema.6.data-type': 'DOUBLE',
'schema.7.name': 'float_field',
'schema.7.data-type': 'FLOAT',
'schema.8.name': 'byte_field',
'schema.8.data-type': 'TINYINT',
'schema.9.name': 'short_field',
'schema.9.data-type': 'SMALLINT',
'schema.10.name': 'boolean_field',
'schema.10.data-type': 'BOOLEAN'}
self.assertEqual(expected, properties)
def test_fields(self):
fields = collections.OrderedDict([
("int_field", DataTypes.INT()),
("long_field", DataTypes.BIGINT()),
("string_field", DataTypes.STRING()),
("timestamp_field", DataTypes.TIMESTAMP(3)),
("time_field", DataTypes.TIME()),
("date_field", DataTypes.DATE()),
("double_field", DataTypes.DOUBLE()),
("float_field", DataTypes.FLOAT()),
("byte_field", DataTypes.TINYINT()),
("short_field", DataTypes.SMALLINT()),
("boolean_field", DataTypes.BOOLEAN())
])
schema = Schema().fields(fields)
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)',
'schema.3.name': 'timestamp_field',
'schema.3.data-type': 'TIMESTAMP(3)',
'schema.4.name': 'time_field',
'schema.4.data-type': 'TIME(0)',
'schema.5.name': 'date_field',
'schema.5.data-type': 'DATE',
'schema.6.name': 'double_field',
'schema.6.data-type': 'DOUBLE',
'schema.7.name': 'float_field',
'schema.7.data-type': 'FLOAT',
'schema.8.name': 'byte_field',
'schema.8.data-type': 'TINYINT',
'schema.9.name': 'short_field',
'schema.9.data-type': 'SMALLINT',
'schema.10.name': 'boolean_field',
'schema.10.data-type': 'BOOLEAN'}
self.assertEqual(expected, properties)
if sys.version_info[:2] <= (3, 5):
fields = {
"int_field": DataTypes.INT(),
"long_field": DataTypes.BIGINT(),
"string_field": DataTypes.STRING(),
"timestamp_field": DataTypes.TIMESTAMP(3),
"time_field": DataTypes.TIME(),
"date_field": DataTypes.DATE(),
"double_field": DataTypes.DOUBLE(),
"float_field": DataTypes.FLOAT(),
"byte_field": DataTypes.TINYINT(),
"short_field": DataTypes.SMALLINT(),
"boolean_field": DataTypes.BOOLEAN()
}
self.assertRaises(TypeError, Schema().fields, fields)
def test_field_in_string(self):
schema = Schema()\
.field("int_field", 'INT')\
.field("long_field", 'BIGINT')\
.field("string_field", 'VARCHAR')\
.field("timestamp_field", 'SQL_TIMESTAMP')\
.field("time_field", 'SQL_TIME')\
.field("date_field", 'SQL_DATE')\
.field("double_field", 'DOUBLE')\
.field("float_field", 'FLOAT')\
.field("byte_field", 'TINYINT')\
.field("short_field", 'SMALLINT')\
.field("boolean_field", 'BOOLEAN')
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR',
'schema.3.name': 'timestamp_field',
'schema.3.data-type': 'TIMESTAMP(3)',
'schema.4.name': 'time_field',
'schema.4.data-type': 'TIME(0)',
'schema.5.name': 'date_field',
'schema.5.data-type': 'DATE',
'schema.6.name': 'double_field',
'schema.6.data-type': 'DOUBLE',
'schema.7.name': 'float_field',
'schema.7.data-type': 'FLOAT',
'schema.8.name': 'byte_field',
'schema.8.data-type': 'TINYINT',
'schema.9.name': 'short_field',
'schema.9.data-type': 'SMALLINT',
'schema.10.name': 'boolean_field',
'schema.10.data-type': 'BOOLEAN'}
self.assertEqual(expected, properties)
def test_from_origin_field(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("long_field", DataTypes.BIGINT()).from_origin_field("origin_field_a")\
.field("string_field", DataTypes.STRING())
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.1.from': 'origin_field_a',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
def test_proctime(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("ptime", DataTypes.BIGINT()).proctime()\
.field("string_field", DataTypes.STRING())
properties = schema.to_properties()
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'ptime',
'schema.1.data-type': 'BIGINT',
'schema.1.proctime': 'true',
'schema.2.name': 'string_field',
'schema.2.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
def test_rowtime(self):
schema = Schema()\
.field("int_field", DataTypes.INT())\
.field("long_field", DataTypes.BIGINT())\
.field("rtime", DataTypes.BIGINT())\
.rowtime(
Rowtime().timestamps_from_field("long_field").watermarks_periodic_bounded(5000))\
.field("string_field", DataTypes.STRING())
properties = schema.to_properties()
print(properties)
expected = {'schema.0.name': 'int_field',
'schema.0.data-type': 'INT',
'schema.1.name': 'long_field',
'schema.1.data-type': 'BIGINT',
'schema.2.name': 'rtime',
'schema.2.data-type': 'BIGINT',
'schema.2.rowtime.timestamps.type': 'from-field',
'schema.2.rowtime.timestamps.from': 'long_field',
'schema.2.rowtime.watermarks.type': 'periodic-bounded',
'schema.2.rowtime.watermarks.delay': '5000',
'schema.3.name': 'string_field',
'schema.3.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
def test_schema(self):
table_schema = TableSchema(["a", "b"], [DataTypes.INT(), DataTypes.STRING()])
schema = Schema().schema(table_schema)
properties = schema.to_properties()
expected = {'schema.0.name': 'a',
'schema.0.data-type': 'INT',
'schema.1.name': 'b',
'schema.1.data-type': 'VARCHAR(2147483647)'}
self.assertEqual(expected, properties)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 14,141 | 43.895238 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_pandas_udaf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import uuid
from pyflink.table.expressions import col, call, lit, row_interval
from pyflink.table.types import DataTypes
from pyflink.table.udf import udaf, udf, AggregateFunction
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase
def generate_random_table_name():
return "Table{0}".format(str(uuid.uuid1()).replace("-", "_"))
class BatchPandasUDAFITTests(PyFlinkBatchTableTestCase):
@classmethod
def setUpClass(cls):
super(BatchPandasUDAFITTests, cls).setUpClass()
cls.t_env.create_temporary_system_function("max_add", udaf(MaxAdd(),
result_type=DataTypes.INT(),
func_type="pandas"))
cls.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
def test_check_result_type(self):
def pandas_udaf():
pass
with self.assertRaises(
TypeError,
msg="Invalid returnType: Pandas UDAF doesn't support DataType type MAP currently"):
udaf(pandas_udaf, result_type=DataTypes.MAP(DataTypes.INT(), DataTypes.INT()),
func_type="pandas")
def test_group_aggregate_function(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a TINYINT,
b FLOAT,
c ROW<a INT, b INT>,
d STRING
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
# general udf
add = udf(lambda a: a + 1, result_type=DataTypes.INT())
# pandas udf
substract = udf(lambda a: a - 1, result_type=DataTypes.INT(), func_type="pandas")
max_udaf = udaf(lambda a: (a.max(), a.min()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
@udaf(result_type=DataTypes.STRING(), func_type="pandas")
def multiply_udaf(a, b):
return len(a) * b[0]
t.group_by(t.a) \
.select(t.a, mean_udaf(add(t.b)), max_udaf(substract(t.c)), multiply_udaf(t.b, 'abc')) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 6.0, +I[5, 2], abcabcabc]",
"+I[2, 3.0, +I[3, 2], abcabc]",
"+I[3, 3.0, +I[2, 2], abc]"])
def test_group_aggregate_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a INT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
min_add = udaf(lambda a, b, c: a.min() + b.min() + c.min(),
result_type=DataTypes.INT(), func_type="pandas")
t.select(min_add(t.a, t.b, t.c)) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[5]"])
def test_group_aggregate_with_aux_group(self):
t = self.t_env.from_elements(
[(1, 2, 3), (3, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b INT, c FLOAT, d INT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
self.t_env.get_config().get_configuration().set_string('python.metric.enabled', 'true')
self.t_env.get_config().set('python.metric.enabled', 'true')
t.group_by(t.a) \
.select(t.a, (t.a + 1).alias("b"), (t.a + 2).alias("c")) \
.group_by(t.a, t.b) \
.select(t.a, t.b, mean_udaf(t.b), call("max_add", t.b, t.c, 1)) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2, 2.0, 6]", "+I[2, 3, 3.0, 8]", "+I[3, 4, 4.0, 10]"])
def test_tumble_group_window_aggregate_function(self):
from pyflink.table.window import Tumble
# create source file path
data = [
'1,2,3,2018-03-11 03:10:00',
'3,2,4,2018-03-11 03:10:00',
'2,1,2,2018-03-11 03:10:00',
'1,3,1,2018-03-11 03:40:00',
'1,8,5,2018-03-11 04:20:00',
'2,3,6,2018-03-11 03:30:00'
]
source_path = self.tempdir + '/test_tumble_group_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
c INT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table_ddl)
t = self.t_env.from_path(source_table)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a TIMESTAMP(3),
b TIMESTAMP(3),
c FLOAT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
tumble_window = Tumble.over(lit(1).hours) \
.on(col("rowtime")) \
.alias("w")
t.window(tumble_window) \
.group_by(col("w")) \
.select(col("w").start, col("w").end, mean_udaf(t.b)) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11T03:00, 2018-03-11T04:00, 2.2]",
"+I[2018-03-11T04:00, 2018-03-11T05:00, 8.0]"])
def test_slide_group_window_aggregate_function(self):
from pyflink.table.window import Slide
# create source file path
data = [
'1,2,3,2018-03-11 03:10:00',
'3,2,4,2018-03-11 03:10:00',
'2,1,2,2018-03-11 03:10:00',
'1,3,1,2018-03-11 03:40:00',
'1,8,5,2018-03-11 04:20:00',
'2,3,6,2018-03-11 03:30:00'
]
source_path = self.tempdir + '/test_slide_group_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
c INT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table_ddl)
t = self.t_env.from_path(source_table)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a TINYINT,
b TIMESTAMP(3),
c TIMESTAMP(3),
d FLOAT,
e INT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
slide_window = Slide.over(lit(1).hours) \
.every(lit(30).minutes) \
.on(col("rowtime")) \
.alias("w")
t.window(slide_window) \
.group_by(t.a, col("w")) \
.select(t.a,
col("w").start,
col("w").end,
mean_udaf(t.b),
call("max_add", t.b, t.c, 1)) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11T02:30, 2018-03-11T03:30, 2.0, 6]",
"+I[1, 2018-03-11T03:00, 2018-03-11T04:00, 2.5, 7]",
"+I[1, 2018-03-11T03:30, 2018-03-11T04:30, 5.5, 14]",
"+I[1, 2018-03-11T04:00, 2018-03-11T05:00, 8.0, 14]",
"+I[2, 2018-03-11T02:30, 2018-03-11T03:30, 1.0, 4]",
"+I[2, 2018-03-11T03:00, 2018-03-11T04:00, 2.0, 10]",
"+I[2, 2018-03-11T03:30, 2018-03-11T04:30, 3.0, 10]",
"+I[3, 2018-03-11T03:00, 2018-03-11T04:00, 2.0, 7]",
"+I[3, 2018-03-11T02:30, 2018-03-11T03:30, 2.0, 7]"])
def test_over_window_aggregate_function(self):
import datetime
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a TINYINT,
b FLOAT,
c INT,
d FLOAT,
e FLOAT,
f FLOAT,
g FLOAT,
h FLOAT,
i FLOAT,
j FLOAT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
self.t_env.create_temporary_view("T_test_over_window_aggregate_function", t)
self.t_env.execute_sql(f"""
insert into {sink_table}
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND UNBOUNDED FOLLOWING),
max_add(b, c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN UNBOUNDED preceding AND 0 FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND 0 FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW),
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND UNBOUNDED FOLLOWING),
mean_udaf(c)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from T_test_over_window_aggregate_function
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 4.3333335, 5, 4.3333335, 3.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[1, 4.3333335, 13, 5.5, 3.0, 3.0, 4.3333335, 8.0, 5.0, 5.0]",
"+I[1, 4.3333335, 6, 4.3333335, 2.0, 3.0, 2.5, 4.3333335, 3.0, 2.0]",
"+I[2, 2.0, 9, 2.0, 4.0, 4.0, 2.0, 2.0, 4.0, 4.0]",
"+I[2, 2.0, 3, 2.0, 2.0, 4.0, 1.0, 2.0, 4.0, 2.0]",
"+I[3, 2.0, 3, 2.0, 1.0, 1.0, 2.0, 2.0, 1.0, 1.0]"])
class StreamPandasUDAFITTests(PyFlinkStreamTableTestCase):
@classmethod
def setUpClass(cls):
super(StreamPandasUDAFITTests, cls).setUpClass()
cls.t_env.create_temporary_system_function("mean_udaf", mean_udaf)
max_add_min_udaf = udaf(lambda a: a.max() + a.min(),
result_type='SMALLINT',
func_type='pandas')
cls.t_env.create_temporary_system_function("max_add_min_udaf", max_add_min_udaf)
def test_sliding_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table_ddl)
t = self.t_env.from_path(source_table)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b TIMESTAMP(3), c TIMESTAMP(3), d FLOAT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Slide.over(lit(1).hours)
.every(lit(30).minutes)
.on(col("rowtime"))
.alias("w")) \
.group_by(t.a, t.b, col("w")) \
.select(t.a, col("w").start, col("w").end, mean_udaf(t.c).alias("b")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 2018-03-11T02:30, 2018-03-11T03:30, 2.0]",
"+I[1, 2018-03-11T03:00, 2018-03-11T04:00, 2.5]",
"+I[1, 2018-03-11T03:30, 2018-03-11T04:30, 5.5]",
"+I[1, 2018-03-11T04:00, 2018-03-11T05:00, 8.0]",
"+I[2, 2018-03-11T02:30, 2018-03-11T03:30, 1.0]",
"+I[2, 2018-03-11T03:00, 2018-03-11T04:00, 2.0]",
"+I[2, 2018-03-11T03:30, 2018-03-11T04:30, 3.0]",
"+I[3, 2018-03-11T03:00, 2018-03-11T04:00, 2.0]",
"+I[3, 2018-03-11T02:30, 2018-03-11T03:30, 2.0]"])
os.remove(source_path)
def test_sliding_group_window_over_count(self):
self.t_env.get_config().set("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_sliding_group_window_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Slide
self.t_env.get_config().set(
"pipeline.time-characteristic", "ProcessingTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table_ddl)
t = self.t_env.from_path(source_table)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, d FLOAT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Slide.over(row_interval(2))
.every(row_interval(1))
.on(t.protime)
.alias("w")) \
.group_by(t.a, t.b, col("w")) \
.select(t.a, mean_udaf(t.c).alias("b")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 5.5]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_tumbling_group_window_over_time(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_tumbling_group_window_over_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
c SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table_ddl)
t = self.t_env.from_path(source_table)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(
a TINYINT, b TIMESTAMP(3), c TIMESTAMP(3), d TIMESTAMP(3), e FLOAT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Tumble.over(lit(1).hours).on(t.rowtime).alias("w")) \
.group_by(t.a, t.b, col("w")) \
.select(t.a,
col("w").start,
col("w").end,
col("w").rowtime,
mean_udaf(t.c).alias("b")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, [
"+I[1, 2018-03-11T03:00, 2018-03-11T04:00, 2018-03-11T03:59:59.999, 2.5]",
"+I[1, 2018-03-11T04:00, 2018-03-11T05:00, 2018-03-11T04:59:59.999, 8.0]",
"+I[2, 2018-03-11T03:00, 2018-03-11T04:00, 2018-03-11T03:59:59.999, 2.0]",
"+I[3, 2018-03-11T03:00, 2018-03-11T04:00, 2018-03-11T03:59:59.999, 2.0]",
])
os.remove(source_path)
def test_tumbling_group_window_over_count(self):
self.t_env.get_config().set("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2,2018-03-11 03:10:00',
'3,3,2,2018-03-11 03:10:00',
'2,2,1,2018-03-11 03:10:00',
'1,1,3,2018-03-11 03:40:00',
'1,1,8,2018-03-11 04:20:00',
'2,2,3,2018-03-11 03:30:00',
'3,3,3,2018-03-11 03:30:00',
'1,1,4,2018-03-11 04:20:00',
]
source_path = tmp_dir + '/test_group_window_aggregate_function_over_count.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
from pyflink.table.window import Tumble
self.t_env.get_config().set(
"pipeline.time-characteristic", "ProcessingTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
c SMALLINT,
protime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table_ddl)
t = self.t_env.from_path(source_table)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, d FLOAT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t.window(Tumble.over(row_interval(2)).on(t.protime).alias("w")) \
.group_by(t.a, t.b, col("w")) \
.select(t.a, mean_udaf(t.c).alias("b")) \
.execute_insert(sink_table) \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 2.5]", "+I[1, 6.0]", "+I[2, 2.0]", "+I[3, 2.5]"])
os.remove(source_path)
def test_row_time_over_range_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_range_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table_ddl)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b FLOAT, c SMALLINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
self.t_env.execute_sql(f"""
insert into {sink_table}
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
RANGE BETWEEN INTERVAL '20' MINUTE PRECEDING AND CURRENT ROW)
from {source_table}
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 3.0, 6]",
"+I[1, 3.0, 6]",
"+I[1, 8.0, 16]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_row_time_over_rows_window_aggregate_function(self):
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table_ddl)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b FLOAT, c SMALLINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
self.t_env.execute_sql(f"""
insert into {sink_table}
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from {source_table}
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
os.remove(source_path)
def test_proc_time_over_rows_window_aggregate_function(self):
# create source file path
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = self.tempdir + '/test_proc_time_over_rows_window_aggregate_function.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().set("parallelism.default", "1")
self.t_env.get_config().set(
"pipeline.time-characteristic", "ProcessingTime")
source_table = generate_random_table_name()
source_table_ddl = f"""
create table {source_table}(
a TINYINT,
b SMALLINT,
proctime as PROCTIME()
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{source_path}',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
"""
self.t_env.execute_sql(source_table_ddl)
sink_table = generate_random_table_name()
sink_table_ddl = f"""
CREATE TABLE {sink_table}(a TINYINT, b FLOAT, c SMALLINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
self.t_env.execute_sql(f"""
insert into {sink_table}
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY proctime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from {source_table}
""").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1.0, 2]",
"+I[1, 3.0, 6]",
"+I[1, 6.5, 13]",
"+I[2, 1.0, 2]",
"+I[2, 2.0, 4]",
"+I[3, 2.0, 4]"])
def test_execute_over_aggregate_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = [
'1,1,2013-01-01 03:10:00',
'3,2,2013-01-01 03:10:00',
'2,1,2013-01-01 03:10:00',
'1,5,2013-01-01 03:10:00',
'1,8,2013-01-01 04:20:00',
'2,3,2013-01-01 03:30:00'
]
source_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan.csv'
sink_path = tmp_dir + '/test_execute_over_aggregate_from_json_plan'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = generate_random_table_name()
source_table_ddl = f"""
CREATE TABLE {source_table} (
a TINYINT,
b SMALLINT,
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) WITH (
'connector' = 'filesystem',
'path' = '{source_path}',
'format' = 'csv'
)
"""
self.t_env.execute_sql(source_table_ddl)
sink_table = generate_random_table_name()
self.t_env.execute_sql(f"""
CREATE TABLE {sink_table} (
a TINYINT,
b FLOAT,
c SMALLINT
) WITH (
'connector' = 'filesystem',
'path' = '{sink_path}',
'format' = 'csv'
)
""")
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
json_plan = self.t_env._j_tenv.compilePlanSql(f"""
insert into {sink_table}
select a,
mean_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW),
max_add_min_udaf(b)
over (PARTITION BY a ORDER BY rowtime
ROWS BETWEEN 1 PRECEDING AND CURRENT ROW)
from {source_table}
""")
from py4j.java_gateway import get_method
get_method(json_plan.execute(), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,1.0,2', '1,3.0,6', '1,6.5,13', '2,1.0,2', '2,2.0,4', '3,2.0,4'])
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
class MaxAdd(AggregateFunction):
def __init__(self):
self.counter = None
self.counter_sum = 0
def open(self, function_context):
mg = function_context.get_metric_group()
self.counter = mg.add_group("key", "value").counter("my_counter")
self.counter_sum = 0
def get_value(self, accumulator):
# counter
self.counter.inc(10)
self.counter_sum += 10
return accumulator[0]
def create_accumulator(self):
return []
def accumulate(self, accumulator, *args):
result = 0
for arg in args:
result += arg.max()
accumulator.append(result)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 35,865 | 38.806881 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_expressions_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table import expressions
class ExpressionsCompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :module:`pyflink.table.expressions` is consistent with
Java `org.apache.flink.table.api.Expressions`.
"""
@classmethod
def python_class(cls):
return expressions
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.Expressions"
@classmethod
def java_method_name(cls, python_method_name):
return {'and_': 'and',
'or_': 'or',
'not_': 'not',
'range_': 'range',
'map_': 'map'}.get(python_method_name, python_method_name)
@classmethod
def excluded_methods(cls):
return {
'$'
}
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,120 | 34.35 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_expression.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import DataTypes
from pyflink.table.expression import TimeIntervalUnit, TimePointUnit, JsonExistsOnError, \
JsonValueOnEmptyOrError, JsonType, JsonQueryWrapper, JsonQueryOnEmptyOrError
from pyflink.table.expressions import (col, lit, range_, and_, or_, current_date,
current_time, current_timestamp, current_database,
local_timestamp, local_time, temporal_overlaps, date_format,
timestamp_diff, array, row, map_, row_interval, pi, e,
rand, rand_integer, atan2, negative, concat, concat_ws, uuid,
null_of, log, if_then_else, with_columns, call,
to_timestamp_ltz, from_unixtime, to_date, to_timestamp,
convert_tz, unix_timestamp)
from pyflink.testing.test_case_utils import PyFlinkTestCase
class PyFlinkBatchExpressionTests(PyFlinkTestCase):
def test_expression(self):
expr1 = col('a')
expr2 = col('b')
expr3 = col('c')
expr4 = col('d')
expr5 = lit(10)
# comparison functions
self.assertEqual('equals(a, b)', str(expr1 == expr2))
self.assertEqual('mod(2, b)', str(2 % expr2))
self.assertEqual('notEquals(a, b)', str(expr1 != expr2))
self.assertEqual('lessThan(a, b)', str(expr1 < expr2))
self.assertEqual('lessThanOrEqual(a, b)', str(expr1 <= expr2))
self.assertEqual('greaterThan(a, b)', str(expr1 > expr2))
self.assertEqual('greaterThanOrEqual(a, b)', str(expr1 >= expr2))
# logic functions
self.assertEqual('and(a, b)', str(expr1 & expr2))
self.assertEqual('or(a, b)', str(expr1 | expr2))
self.assertEqual('isNotTrue(a)', str(expr1.is_not_true))
self.assertEqual('isNotTrue(a)', str(~expr1))
# arithmetic functions
self.assertEqual('plus(a, b)', str(expr1 + expr2))
self.assertEqual('plus(2, b)', str(2 + expr2))
self.assertEqual('plus(cast(b, DATE), 2)', str(expr2.to_date + 2))
self.assertEqual('minus(a, b)', str(expr1 - expr2))
self.assertEqual('minus(cast(b, DATE), 2)', str(expr2.to_date - 2))
self.assertEqual('times(a, b)', str(expr1 * expr2))
self.assertEqual('divide(a, b)', str(expr1 / expr2))
self.assertEqual('mod(a, b)', str(expr1 % expr2))
self.assertEqual('power(a, b)', str(expr1 ** expr2))
self.assertEqual('minusPrefix(a)', str(-expr1))
self.assertEqual('exp(a)', str(expr1.exp))
self.assertEqual('log10(a)', str(expr1.log10))
self.assertEqual('log2(a)', str(expr1.log2))
self.assertEqual('ln(a)', str(expr1.ln))
self.assertEqual('log(a)', str(expr1.log()))
self.assertEqual('cosh(a)', str(expr1.cosh))
self.assertEqual('sinh(a)', str(expr1.sinh))
self.assertEqual('sin(a)', str(expr1.sin))
self.assertEqual('cos(a)', str(expr1.cos))
self.assertEqual('tan(a)', str(expr1.tan))
self.assertEqual('cot(a)', str(expr1.cot))
self.assertEqual('asin(a)', str(expr1.asin))
self.assertEqual('acos(a)', str(expr1.acos))
self.assertEqual('atan(a)', str(expr1.atan))
self.assertEqual('tanh(a)', str(expr1.tanh))
self.assertEqual('degrees(a)', str(expr1.degrees))
self.assertEqual('radians(a)', str(expr1.radians))
self.assertEqual('sqrt(a)', str(expr1.sqrt))
self.assertEqual('abs(a)', str(expr1.abs))
self.assertEqual('abs(a)', str(abs(expr1)))
self.assertEqual('sign(a)', str(expr1.sign))
self.assertEqual('round(a, b)', str(expr1.round(expr2)))
self.assertEqual('between(a, b, c)', str(expr1.between(expr2, expr3)))
self.assertEqual('notBetween(a, b, c)', str(expr1.not_between(expr2, expr3)))
self.assertEqual('ifThenElse(a, b, c)', str(expr1.then(expr2, expr3)))
self.assertEqual('isNull(a)', str(expr1.is_null))
self.assertEqual('isNotNull(a)', str(expr1.is_not_null))
self.assertEqual('isTrue(a)', str(expr1.is_true))
self.assertEqual('isFalse(a)', str(expr1.is_false))
self.assertEqual('isNotTrue(a)', str(expr1.is_not_true))
self.assertEqual('isNotFalse(a)', str(expr1.is_not_false))
self.assertEqual('distinct(a)', str(expr1.distinct))
self.assertEqual('sum(a)', str(expr1.sum))
self.assertEqual('sum0(a)', str(expr1.sum0))
self.assertEqual('min(a)', str(expr1.min))
self.assertEqual('max(a)', str(expr1.max))
self.assertEqual('count(a)', str(expr1.count))
self.assertEqual('avg(a)', str(expr1.avg))
self.assertEqual('first_value(a)', str(expr1.first_value))
self.assertEqual('last_value(a)', str(expr1.last_value))
self.assertEqual("listAgg(a, ',')", str(expr1.list_agg(",")))
self.assertEqual('stddevPop(a)', str(expr1.stddev_pop))
self.assertEqual('stddevSamp(a)', str(expr1.stddev_samp))
self.assertEqual('varPop(a)', str(expr1.var_pop))
self.assertEqual('varSamp(a)', str(expr1.var_samp))
self.assertEqual('collect(a)', str(expr1.collect))
self.assertEqual("as(a, 'a', 'b', 'c')", str(expr1.alias('a', 'b', 'c')))
self.assertEqual('cast(a, INT)', str(expr1.cast(DataTypes.INT())))
self.assertEqual('asc(a)', str(expr1.asc))
self.assertEqual('desc(a)', str(expr1.desc))
self.assertEqual('in(a, b, c, d)', str(expr1.in_(expr2, expr3, expr4)))
self.assertEqual('start(a)', str(expr1.start))
self.assertEqual('end(a)', str(expr1.end))
self.assertEqual('bin(a)', str(expr1.bin))
self.assertEqual('hex(a)', str(expr1.hex))
self.assertEqual('truncate(a, 3)', str(expr1.truncate(3)))
# string functions
self.assertEqual('substring(a, b)', str(expr1.substring(expr2)))
self.assertEqual('substring(a, b, 3)', str(expr1.substring(expr2, 3)))
self.assertEqual('substr(a, b)', str(expr1.substr(expr2)))
self.assertEqual('substr(a, b, 3)', str(expr1.substr(expr2, 3)))
self.assertEqual("trim(true, false, ' ', a)", str(expr1.trim_leading()))
self.assertEqual("trim(false, true, ' ', a)", str(expr1.trim_trailing()))
self.assertEqual("trim(true, true, ' ', a)", str(expr1.trim()))
self.assertEqual('replace(a, b, c)', str(expr1.replace(expr2, expr3)))
self.assertEqual('charLength(a)', str(expr1.char_length))
self.assertEqual('upper(a)', str(expr1.upper_case))
self.assertEqual('lower(a)', str(expr1.lower_case))
self.assertEqual('initCap(a)', str(expr1.init_cap))
self.assertEqual("like(a, 'Jo_n%')", str(expr1.like('Jo_n%')))
self.assertEqual("similar(a, 'A+')", str(expr1.similar('A+')))
self.assertEqual('position(a, b)', str(expr1.position(expr2)))
self.assertEqual('lpad(a, 4, b)', str(expr1.lpad(4, expr2)))
self.assertEqual('rpad(a, 4, b)', str(expr1.rpad(4, expr2)))
self.assertEqual('overlay(a, b, 6, 2)', str(expr1.overlay(expr2, 6, 2)))
self.assertEqual("regexp(a, b)", str(expr1.regexp(expr2)))
self.assertEqual("regexpReplace(a, b, 'abc')", str(expr1.regexp_replace(expr2, 'abc')))
self.assertEqual('regexpExtract(a, b, 3)', str(expr1.regexp_extract(expr2, 3)))
self.assertEqual('fromBase64(a)', str(expr1.from_base64))
self.assertEqual('toBase64(a)', str(expr1.to_base64))
self.assertEqual('ascii(a)', str(expr1.ascii))
self.assertEqual('chr(a)', str(expr1.chr))
self.assertEqual("decode(a, 'utf-8')", str(expr1.decode('utf-8')))
self.assertEqual("encode(a, 'utf-8')", str(expr1.encode('utf-8')))
self.assertEqual('left(a, 2)', str(expr1.left(2)))
self.assertEqual('right(a, 2)', str(expr1.right(2)))
self.assertEqual('instr(a, b)', str(expr1.instr(expr2)))
self.assertEqual('locate(a, b)', str(expr1.locate(expr2)))
self.assertEqual('locate(a, b, 2)', str(expr1.locate(expr2, 2)))
self.assertEqual('parseUrl(a, b)', str(expr1.parse_url(expr2)))
self.assertEqual("parseUrl(a, b, 'query')", str(expr1.parse_url(expr2, 'query')))
self.assertEqual('ltrim(a)', str(expr1.ltrim))
self.assertEqual('rtrim(a)', str(expr1.rtrim))
self.assertEqual('repeat(a, 3)', str(expr1.repeat(3)))
self.assertEqual("over(a, 'w')", str(expr1.over('w')))
self.assertEqual('reverse(a)', str(expr1.reverse))
self.assertEqual("splitIndex(a, ',', 3)", str(expr1.split_index(',', 3)))
self.assertEqual("strToMap(a)", str(expr1.str_to_map()))
self.assertEqual("strToMap(a, ';', ':')", str(expr1.str_to_map(';', ':')))
# temporal functions
self.assertEqual('cast(a, DATE)', str(expr1.to_date))
self.assertEqual('cast(a, TIME(0))', str(expr1.to_time))
self.assertEqual('cast(a, TIMESTAMP(3))', str(expr1.to_timestamp))
self.assertEqual('extract(YEAR, a)', str(expr1.extract(TimeIntervalUnit.YEAR)))
self.assertEqual('floor(a, YEAR)', str(expr1.floor(TimeIntervalUnit.YEAR)))
self.assertEqual('ceil(a)', str(expr1.ceil()))
# advanced type helper functions
self.assertEqual("get(a, 'col')", str(expr1.get('col')))
self.assertEqual('flatten(a)', str(expr1.flatten))
self.assertEqual('at(a, 0)', str(expr1.at(0)))
self.assertEqual('cardinality(a)', str(expr1.cardinality))
self.assertEqual('element(a)', str(expr1.element))
# time definition functions
self.assertEqual('rowtime(a)', str(expr1.rowtime))
self.assertEqual('proctime(a)', str(expr1.proctime))
self.assertEqual('120', str(expr5.year))
self.assertEqual('120', str(expr5.years))
self.assertEqual('30', str(expr5.quarter))
self.assertEqual('30', str(expr5.quarters))
self.assertEqual('10', str(expr5.month))
self.assertEqual('10', str(expr5.months))
self.assertEqual('6048000000', str(expr5.week))
self.assertEqual('6048000000', str(expr5.weeks))
self.assertEqual('864000000', str(expr5.day))
self.assertEqual('864000000', str(expr5.days))
self.assertEqual('36000000', str(expr5.hour))
self.assertEqual('36000000', str(expr5.hours))
self.assertEqual('600000', str(expr5.minute))
self.assertEqual('600000', str(expr5.minutes))
self.assertEqual('10000', str(expr5.second))
self.assertEqual('10000', str(expr5.seconds))
self.assertEqual('10', str(expr5.milli))
self.assertEqual('10', str(expr5.millis))
# hash functions
self.assertEqual('md5(a)', str(expr1.md5))
self.assertEqual('sha1(a)', str(expr1.sha1))
self.assertEqual('sha224(a)', str(expr1.sha224))
self.assertEqual('sha256(a)', str(expr1.sha256))
self.assertEqual('sha384(a)', str(expr1.sha384))
self.assertEqual('sha512(a)', str(expr1.sha512))
self.assertEqual('sha2(a, 224)', str(expr1.sha2(224)))
# json functions
self.assertEqual("IS_JSON('42')", str(lit('42').is_json()))
self.assertEqual("IS_JSON('42', SCALAR)", str(lit('42').is_json(JsonType.SCALAR)))
self.assertEqual("JSON_EXISTS('{}', '$.x')", str(lit('{}').json_exists('$.x')))
self.assertEqual("JSON_EXISTS('{}', '$.x', FALSE)",
str(lit('{}').json_exists('$.x', JsonExistsOnError.FALSE)))
self.assertEqual("JSON_VALUE('{}', '$.x', STRING, NULL, null, NULL, null)",
str(lit('{}').json_value('$.x')))
self.assertEqual("JSON_VALUE('{}', '$.x', INT, DEFAULT, 42, ERROR, null)",
str(lit('{}').json_value('$.x', DataTypes.INT(),
JsonValueOnEmptyOrError.DEFAULT, 42,
JsonValueOnEmptyOrError.ERROR, None)))
self.assertEqual("JSON_QUERY('{}', '$.x', WITHOUT_ARRAY, NULL, EMPTY_ARRAY)",
str(lit('{}').json_query('$.x', JsonQueryWrapper.WITHOUT_ARRAY,
JsonQueryOnEmptyOrError.NULL,
JsonQueryOnEmptyOrError.EMPTY_ARRAY)))
def test_expressions(self):
expr1 = col('a')
expr2 = col('b')
expr3 = col('c')
self.assertEqual('10', str(lit(10, DataTypes.INT(False))))
self.assertEqual('rangeTo(1, 2)', str(range_(1, 2)))
self.assertEqual('and(a, b, c)', str(and_(expr1, expr2, expr3)))
self.assertEqual('or(a, b, c)', str(or_(expr1, expr2, expr3)))
from pyflink.table.expressions import UNBOUNDED_ROW, UNBOUNDED_RANGE, CURRENT_ROW, \
CURRENT_RANGE
self.assertEqual('unboundedRow()', str(UNBOUNDED_ROW))
self.assertEqual('unboundedRange()', str(UNBOUNDED_RANGE))
self.assertEqual('currentRow()', str(CURRENT_ROW))
self.assertEqual('currentRange()', str(CURRENT_RANGE))
self.assertEqual('currentDatabase()', str(current_database()))
self.assertEqual('currentDate()', str(current_date()))
self.assertEqual('currentTime()', str(current_time()))
self.assertEqual('currentTimestamp()', str(current_timestamp()))
self.assertEqual('localTime()', str(local_time()))
self.assertEqual('localTimestamp()', str(local_timestamp()))
self.assertEqual("toDate('2018-03-18')", str(to_date('2018-03-18')))
self.assertEqual("toDate('2018-03-18', 'yyyy-MM-dd')",
str(to_date('2018-03-18', 'yyyy-MM-dd')))
self.assertEqual('toTimestampLtz(123, 0)', str(to_timestamp_ltz(123, 0)))
self.assertEqual("toTimestamp('1970-01-01 08:01:40')",
str(to_timestamp('1970-01-01 08:01:40')))
self.assertEqual("toTimestamp('1970-01-01 08:01:40', 'yyyy-MM-dd HH:mm:ss')",
str(to_timestamp('1970-01-01 08:01:40', 'yyyy-MM-dd HH:mm:ss')))
self.assertEqual("temporalOverlaps(cast('2:55:00', TIME(0)), 3600000, "
"cast('3:30:00', TIME(0)), 7200000)",
str(temporal_overlaps(
lit("2:55:00").to_time,
lit(1).hours,
lit("3:30:00").to_time,
lit(2).hours)))
self.assertEqual("dateFormat(time, '%Y, %d %M')",
str(date_format(col("time"), "%Y, %d %M")))
self.assertEqual("timestampDiff(DAY, cast('2016-06-15', DATE), cast('2016-06-18', DATE))",
str(timestamp_diff(
TimePointUnit.DAY,
lit("2016-06-15").to_date,
lit("2016-06-18").to_date)))
self.assertEqual("convertTz('2018-03-14 11:00:00', 'UTC', 'Asia/Shanghai')",
str(convert_tz("2018-03-14 11:00:00", "UTC", "Asia/Shanghai")))
self.assertEqual("fromUnixtime(1)", str(from_unixtime(1)))
self.assertEqual("fromUnixtime(1, 'yy-MM-dd HH-mm-ss')",
str(from_unixtime(1, 'yy-MM-dd HH-mm-ss')))
self.assertEqual("unixTimestamp()", str(unix_timestamp()))
self.assertEqual("unixTimestamp('2015-07-24 10:00:00')",
str(unix_timestamp('2015-07-24 10:00:00')))
self.assertEqual("unixTimestamp('2015-07-24 10:00:00', 'yy-MM-dd HH-mm-ss')",
str(unix_timestamp('2015-07-24 10:00:00', 'yy-MM-dd HH-mm-ss')))
self.assertEqual('array(1, 2, 3)', str(array(1, 2, 3)))
self.assertEqual("row('key1', 1)", str(row("key1", 1)))
self.assertEqual("map('key1', 1, 'key2', 2, 'key3', 3)",
str(map_("key1", 1, "key2", 2, "key3", 3)))
self.assertEqual('4', str(row_interval(4)))
self.assertEqual('pi()', str(pi()))
self.assertEqual('e()', str(e()))
self.assertEqual('rand(4)', str(rand(4)))
self.assertEqual('randInteger(4)', str(rand_integer(4)))
self.assertEqual('atan2(1, 2)', str(atan2(1, 2)))
self.assertEqual('minusPrefix(a)', str(negative(expr1)))
self.assertEqual('concat(a, b, c)', str(concat(expr1, expr2, expr3)))
self.assertEqual("concat_ws(', ', b, c)", str(concat_ws(', ', expr2, expr3)))
self.assertEqual('uuid()', str(uuid()))
self.assertEqual('null', str(null_of(DataTypes.BIGINT())))
self.assertEqual('log(a)', str(log(expr1)))
self.assertEqual('ifThenElse(a, b, c)', str(if_then_else(expr1, expr2, expr3)))
self.assertEqual('withColumns(a, b, c)', str(with_columns(expr1, expr2, expr3)))
self.assertEqual('a.b.c(a)', str(call('a.b.c', expr1)))
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 18,163 | 55.409938 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_expression_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table import Expression
class ExpressionCompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`Expression` is consistent with
Java `org.apache.flink.table.api.ApiExpression`.
"""
@classmethod
def python_class(cls):
return Expression
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.ApiExpression"
@classmethod
def excluded_methods(cls):
return {
'asSummaryString',
'accept',
'toExpr',
'getChildren',
# The following methods have been replaced with the built-in methods in Python,
# such as __and__ for and to be more Pythonic.
'and',
'or',
'not',
'isGreater',
'isGreaterOrEqual',
'isLess',
'isLessOrEqual',
'isEqual',
'isNotEqual',
'plus',
'minus',
'dividedBy',
'times',
'mod',
'power'
}
@classmethod
def java_method_name(cls, python_method_name):
return {'alias': 'as', 'in_': 'in'}.get(python_method_name, python_method_name)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,569 | 32.376623 | 91 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_calc.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import array
import datetime
from decimal import Decimal
from pyflink.common import Row
from pyflink.table import DataTypes
from pyflink.table.expressions import row
from pyflink.table.tests.test_types import PythonOnlyPoint, PythonOnlyUDT
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableCalcTests(PyFlinkStreamTableTestCase):
def test_select(self):
t = self.t_env.from_elements([(1, 'hi', 'hello')], ['a', 'b', 'c'])
result = t.select(t.a + 1, t.b, t.c)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('[plus(a, 1), b, c]',
query_operation.getProjectList().toString())
def test_alias(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
t = t.alias("d", "e", "f")
result = t.select(t.d, t.e, t.f)
resolved_schema = result._j_table.getQueryOperation().getResolvedSchema()
self.assertEqual(['d', 'e', 'f'], list(resolved_schema.getColumnNames()))
def test_where(self):
t_env = self.t_env
t = t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.where((t.a > 1) & (t.b == 'Hello'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual("and("
"greaterThan(a, 1), "
"equals(b, 'Hello'))",
query_operation.getCondition().toString())
def test_filter(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.filter((t.a > 1) & (t.b == 'Hello'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual("and("
"greaterThan(a, 1), "
"equals(b, 'Hello'))",
query_operation.getCondition().toString())
def test_from_element(self):
t_env = self.t_env
field_names = ["a", "b", "c", "d", "e", "f", "g", "h",
"i", "j", "k", "l", "m", "n", "o", "p", "q"]
field_types = [DataTypes.BIGINT(), DataTypes.DOUBLE(), DataTypes.STRING(),
DataTypes.STRING(), DataTypes.DATE(),
DataTypes.TIME(),
DataTypes.TIMESTAMP(3),
DataTypes.INTERVAL(DataTypes.SECOND(3)),
DataTypes.ARRAY(DataTypes.DOUBLE()),
DataTypes.ARRAY(DataTypes.DOUBLE(False)),
DataTypes.ARRAY(DataTypes.STRING()),
DataTypes.ARRAY(DataTypes.DATE()),
DataTypes.DECIMAL(38, 18),
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.DOUBLE())]),
DataTypes.MAP(DataTypes.STRING(), DataTypes.DOUBLE()),
DataTypes.BYTES(), PythonOnlyUDT()]
schema = DataTypes.ROW(
list(map(lambda field_name, field_type: DataTypes.FIELD(field_name, field_type),
field_names,
field_types)))
sink_table_ddl = """
CREATE TABLE Results(
a BIGINT,
b DOUBLE,
c STRING,
d STRING,
e DATE,
f TIME,
g TIMESTAMP(3),
h INT,
i ARRAY<DOUBLE>,
j ARRAY<DOUBLE NOT NULL>,
k ARRAY<STRING>,
l ARRAY<DATE>,
m DECIMAL(38, 18),
n ROW<a BIGINT, b DOUBLE>,
o MAP<STRING, DOUBLE>,
p BYTES,
q ARRAY<DOUBLE NOT NULL>)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = t_env.from_elements(
[(1, 1.0, "hi", "hello", datetime.date(1970, 1, 2), datetime.time(1, 0, 0),
datetime.datetime(1970, 1, 2, 0, 0),
datetime.timedelta(days=1, microseconds=10),
[1.0, None], array.array("d", [1.0, 2.0]),
["abc"], [datetime.date(1970, 1, 2)], Decimal(1), Row("a", "b")(1, 2.0),
{"key": 1.0}, bytearray(b'ABCD'), PythonOnlyPoint(3.0, 4.0))],
schema)
t.execute_insert("Results").wait()
actual = source_sink_utils.results()
expected = ['+I[1, 1.0, hi, hello, 1970-01-02, 01:00, 1970-01-02T00:00, '
'86400, [1.0, null], [1.0, 2.0], [abc], [1970-01-02], '
'1.000000000000000000, +I[1, 2.0], {key=1.0}, [65, 66, 67, 68], [3.0, 4.0]]']
self.assert_equals(actual, expected)
def test_from_element_expression(self):
t_env = self.t_env
field_names = ["a", "b", "c"]
field_types = [DataTypes.BIGINT(), DataTypes.STRING(), DataTypes.FLOAT()]
schema = DataTypes.ROW(
list(map(lambda field_name, field_type: DataTypes.FIELD(field_name, field_type),
field_names,
field_types)))
sink_table_ddl = """
CREATE TABLE Results_test_from_element_expression(a BIGINT, b STRING, c FLOAT)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = t_env.from_elements([row(1, 'abc', 2.0), row(2, 'def', 3.0)], schema)
t.execute_insert("Results_test_from_element_expression").wait()
actual = source_sink_utils.results()
expected = ['+I[1, abc, 2.0]', '+I[2, def, 3.0]']
self.assert_equals(actual, expected)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 6,845 | 41.259259 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_table_descriptor.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.config_options import ConfigOptions
from pyflink.table import DataTypes
from pyflink.table.schema import Schema
from pyflink.table.table_descriptor import TableDescriptor, FormatDescriptor
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class TableDescriptorTest(PyFlinkStreamTableTestCase):
def setUp(self):
super(TableDescriptorTest, self).setUp()
self.option_a = ConfigOptions.key("a").boolean_type().no_default_value()
self.option_b = ConfigOptions.key("b").int_type().no_default_value()
self.key_format = ConfigOptions.key("key.format").string_type().no_default_value()
def test_basic(self):
schema = Schema.new_builder() \
.column("f0", DataTypes.STRING()) \
.column("f1", DataTypes.BIGINT()) \
.primary_key("f0") \
.build()
descriptor = TableDescriptor.for_connector("test-connector") \
.schema(schema) \
.partitioned_by("f0") \
.comment("Test Comment") \
.build()
self.assertIsNotNone(descriptor.get_schema())
self.assertEqual(1, len(descriptor.get_partition_keys()))
self.assertEqual("f0", descriptor.get_partition_keys()[0])
self.assertEqual(1, len(descriptor.get_options()))
self.assertEqual("test-connector", descriptor.get_options().get("connector"))
self.assertEqual("Test Comment", descriptor.get_comment())
def test_no_schema(self):
descriptor = TableDescriptor.for_connector("test-connector").build()
self.assertIsNone(descriptor.get_schema())
def test_options(self):
descriptor = TableDescriptor.for_connector("test-connector") \
.schema(Schema.new_builder().build()) \
.option(self.option_a, False) \
.option(self.option_b, 42) \
.option("c", "C") \
.build()
self.assertEqual(4, len(descriptor.get_options()))
self.assertEqual("test-connector", descriptor.get_options().get("connector"))
self.assertEqual("false", descriptor.get_options().get("a"))
self.assertEqual("42", descriptor.get_options().get("b"))
self.assertEqual("C", descriptor.get_options().get("c"))
def test_format_basic(self):
descriptor = TableDescriptor.for_connector("test-connector") \
.schema(Schema.new_builder().build()) \
.format("json") \
.build()
self.assertEqual(2, len(descriptor.get_options()))
self.assertEqual("test-connector", descriptor.get_options().get("connector"))
self.assertEqual("json", descriptor.get_options().get("format"))
def test_format_with_format_descriptor(self):
descriptor = TableDescriptor.for_connector("test-connector") \
.schema(Schema.new_builder().build()) \
.format(FormatDescriptor.for_format("test-format")
.option(self.option_a, True)
.option(self.option_b, 42)
.option("c", "C")
.build(),
self.key_format) \
.build()
self.assertEqual(5, len(descriptor.get_options()))
self.assertEqual("test-connector", descriptor.get_options().get("connector"))
self.assertEqual("test-format", descriptor.get_options().get("key.format"))
self.assertEqual("true", descriptor.get_options().get("key.test-format.a"))
self.assertEqual("42", descriptor.get_options().get("key.test-format.b"))
self.assertEqual("C", descriptor.get_options().get("key.test-format.c"))
def test_to_string(self):
schema = Schema.new_builder().column("f0", DataTypes.STRING()).build()
format_descriptor = FormatDescriptor \
.for_format("test-format") \
.option(self.option_a, False) \
.build()
table_descriptor = TableDescriptor.for_connector("test-connector") \
.schema(schema) \
.partitioned_by("f0") \
.option(self.option_a, True) \
.format(format_descriptor) \
.comment("Test Comment") \
.build()
self.assertEqual("test-format[{a=false}]", str(format_descriptor))
self.assertEqual("""(
`f0` STRING
)
COMMENT 'Test Comment'
PARTITIONED BY (`f0`)
WITH (
'a' = 'true',
'connector' = 'test-connector',
'test-format.a' = 'false',
'format' = 'test-format'
)""", str(table_descriptor))
def test_execute_insert_to_table_descriptor(self):
schema = Schema.new_builder() \
.column("f0", DataTypes.STRING()) \
.build()
table = self.t_env.from_descriptor(TableDescriptor
.for_connector("datagen")
.option("number-of-rows", '10')
.schema(schema)
.build())
table_result = table.execute_insert(TableDescriptor
.for_connector("blackhole")
.schema(schema)
.build())
table_result.collect()
def test_statement_set_insert_using_table_descriptor(self):
schema = Schema.new_builder() \
.column("f0", DataTypes.INT()) \
.build()
source_descriptor = TableDescriptor.for_connector("datagen") \
.schema(schema) \
.option("number-of-rows", '10') \
.build()
sink_descriptor = TableDescriptor.for_connector("blackhole") \
.schema(schema) \
.build()
self.t_env.create_temporary_table("T", source_descriptor)
stmt_set = self.t_env.create_statement_set()
stmt_set.add_insert(sink_descriptor, self.t_env.from_path("T"))
stmt_set.execute().wait()
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 7,160 | 40.633721 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_table_config.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
from pyflink.common import Configuration
from pyflink.table import TableConfig, SqlDialect
from pyflink.testing.test_case_utils import PyFlinkTestCase
class TableConfigTests(PyFlinkTestCase):
def test_get_set_idle_state_retention_time(self):
table_config = TableConfig.get_default()
table_config.set_idle_state_retention_time(
datetime.timedelta(days=1), datetime.timedelta(days=2))
self.assertEqual(3 * 24 * 3600 * 1000 / 2, table_config.get_max_idle_state_retention_time())
self.assertEqual(24 * 3600 * 1000, table_config.get_min_idle_state_retention_time())
def test_get_set_idle_state_rentention(self):
table_config = TableConfig.get_default()
table_config.set_idle_state_retention(datetime.timedelta(days=1))
self.assertEqual(datetime.timedelta(days=1), table_config.get_idle_state_retention())
def test_get_set_local_timezone(self):
table_config = TableConfig.get_default()
table_config.set_local_timezone("Asia/Shanghai")
timezone = table_config.get_local_timezone()
self.assertEqual(timezone, "Asia/Shanghai")
def test_get_set_max_generated_code_length(self):
table_config = TableConfig.get_default()
table_config.set_max_generated_code_length(32000)
max_generated_code_length = table_config.get_max_generated_code_length()
self.assertEqual(max_generated_code_length, 32000)
def test_get_configuration(self):
table_config = TableConfig.get_default()
table_config.set("k1", "v1")
self.assertEqual(table_config.get("k1", ""), "v1")
def test_add_configuration(self):
table_config = TableConfig.get_default()
configuration = Configuration()
configuration.set_string("k1", "v1")
table_config.add_configuration(configuration)
self.assertEqual(table_config.get("k1", ""), "v1")
def test_get_set_sql_dialect(self):
table_config = TableConfig.get_default()
sql_dialect = table_config.get_sql_dialect()
self.assertEqual(sql_dialect, SqlDialect.DEFAULT)
table_config.set_sql_dialect(SqlDialect.HIVE)
sql_dialect = table_config.get_sql_dialect()
self.assertEqual(sql_dialect, SqlDialect.HIVE)
| 3,258 | 38.26506 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_dependency.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import shutil
import sys
import unittest
import uuid
from pyflink.table import DataTypes, TableEnvironment, EnvironmentSettings
from pyflink.table import expressions as expr
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import (PyFlinkTestCase)
class DependencyTests(object):
def test_add_python_file(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n raise Exception('This function should not be called!')")
self.t_env.add_python_file(python_file_path)
python_file_dir_with_higher_priority = os.path.join(
self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir_with_higher_priority)
python_file_path_higher_priority = os.path.join(python_file_dir_with_higher_priority,
"test_dependency_manage_lib.py")
with open(python_file_path_higher_priority, 'w') as f:
f.write("def add_two(a):\n return a + 2")
self.t_env.add_python_file(python_file_path_higher_priority)
def plus_two(i):
from test_dependency_manage_lib import add_two
return add_two(i)
self.t_env.create_temporary_system_function(
"add_two", udf(plus_two, DataTypes.BIGINT(), DataTypes.BIGINT()))
sink_table_ddl = """
CREATE TABLE Results(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(expr.call("add_two", t.a), t.a).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 1]", "+I[4, 2]", "+I[5, 3]"])
def test_add_python_archive(self):
tmp_dir = self.tempdir
archive_dir_path = os.path.join(tmp_dir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("2")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
self.t_env.add_python_archive(archive_file_path, "data")
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i + int(f.read())
self.t_env.create_temporary_system_function("add_from_file",
udf(add_from_file, DataTypes.BIGINT(),
DataTypes.BIGINT()))
sink_table_ddl = """
CREATE TABLE Results(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(expr.call('add_from_file', t.a), t.a).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3, 1]", "+I[4, 2]", "+I[5, 3]"])
class EmbeddedThreadDependencyTests(DependencyTests, PyFlinkTestCase):
def setUp(self):
super(EmbeddedThreadDependencyTests, self).setUp()
self.t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
self.t_env.get_config().set("parallelism.default", "2")
self.t_env.get_config().set("python.fn-execution.bundle.size", "1")
self.t_env.get_config().set("python.execution-mode", "thread")
class BatchDependencyTests(DependencyTests, PyFlinkTestCase):
def setUp(self) -> None:
super(BatchDependencyTests, self).setUp()
self.t_env = TableEnvironment.create(EnvironmentSettings.in_batch_mode())
self.t_env.get_config().set("parallelism.default", "2")
self.t_env.get_config().set("python.fn-execution.bundle.size", "1")
class StreamDependencyTests(DependencyTests, PyFlinkTestCase):
def setUp(self):
super(StreamDependencyTests, self).setUp()
origin_execution_mode = os.environ['_python_worker_execution_mode']
os.environ['_python_worker_execution_mode'] = "loopback"
try:
self.t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
self.t_env.get_config().set("parallelism.default", "2")
self.t_env.get_config().set("python.fn-execution.bundle.size", "1")
finally:
if origin_execution_mode is not None:
os.environ['_python_worker_execution_mode'] = origin_execution_mode
def test_set_requirements_without_cached_directory(self):
requirements_txt_path = os.path.join(self.tempdir, str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("cloudpickle==2.2.0")
self.t_env.set_python_requirements(requirements_txt_path)
def check_requirements(i):
import cloudpickle # noqa # pylint: disable=unused-import
assert '_PYTHON_REQUIREMENTS_INSTALL_DIR' in os.environ
return i
self.t_env.create_temporary_system_function(
"check_requirements",
udf(check_requirements, DataTypes.BIGINT(), DataTypes.BIGINT()))
sink_table_ddl = """
CREATE TABLE Results(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(expr.call('check_requirements', t.a), t.a).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 1]", "+I[2, 2]", "+I[3, 3]"])
def test_set_requirements_with_cached_directory(self):
tmp_dir = self.tempdir
requirements_txt_path = os.path.join(tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("python-package1==0.0.0")
requirements_dir_path = os.path.join(tmp_dir, "requirements_dir_" + str(uuid.uuid4()))
os.mkdir(requirements_dir_path)
package_file_name = "python-package1-0.0.0.tar.gz"
with open(os.path.join(requirements_dir_path, package_file_name), 'wb') as f:
import base64
# This base64 data is encoded from a python package file which includes a
# "python_package1" module. The module contains a "plus(a, b)" function.
# The base64 can be recomputed by following code:
# base64.b64encode(open("python-package1-0.0.0.tar.gz", "rb").read()).decode("utf-8")
f.write(base64.b64decode(
"H4sICNefrV0C/2Rpc3QvcHl0aG9uLXBhY2thZ2UxLTAuMC4wLnRhcgDtmVtv2jAYhnPtX2H1CrRCY+ckI"
"XEx7axuUA11u5imyICTRc1JiVnHfv1MKKWjYxwKEdPehws7xkmUfH5f+3PyqfqWpa1cjG5EKFnLbOvfhX"
"FQTI3nOPPSdavS5Pa8nGMwy3Esi3ke9wyTObbnGNQxamBSKlFQavzUryG8ldG6frpbEGx4yNmDLMp/hPy"
"P8b+6fNN613vdP1z8XdteG3+ug/17/F3Hcw1qIv5H54NUYiyUaH2SRRllaYeytkl6IpEdujI2yH2XapCQ"
"wSRJRDHt0OveZa//uUfeZonUvUO5bHo+0ZcoVo9bMhFRvGx9H41kWj447aUsR0WUq+pui8arWKggK5Jli"
"wGOo/95q79ovXi6/nfyf246Dof/n078fT9KI+X77Xx6BP83bX4Xf5NxT7dz7toO/L8OxjKgeTwpG+KcDp"
"sdQjWFVJMipYI+o0MCk4X/t2UYtqI0yPabCHb3f861XcD/Ty/+Y5nLdCzT0dSPo/SmbKsf6un+b7KV+Ls"
"W4/D/OoC9w/930P9eGwM75//csrD+Q/6P/P/k9D/oX3988Wqw1bS/tf6tR+s/m3EG/ddBqXO9XKf15C8p"
"P9k4HZBtBgzZaVW5vrfKcj+W32W82ygEB9D/Xu9+4/qfP9L/rBv0X1v87yONKRX61/qfzwqjIDzIPTbv/"
"7or3/88i0H/tfBFW7s/s/avRInQH06ieEy7tDrQeYHUdRN7wP+n/vf62LOH/pld7f9xz7a5Pfufedy0oP"
"86iJI8KxStAq6yLC4JWdbbVbWRikR2z1ZGytk5vauW3QdnBFE6XqwmykazCesAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAOBw/AJw5CHBAFAAAA=="))
self.t_env.set_python_requirements(requirements_txt_path, requirements_dir_path)
def add_one(i):
from python_package1 import plus
return plus(i, 1)
self.t_env.create_temporary_system_function(
"add_one",
udf(add_one, DataTypes.BIGINT(), DataTypes.BIGINT()))
sink_table_ddl = """
CREATE TABLE Results(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(expr.call('add_one', t.a), t.a).execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[2, 1]", "+I[3, 2]", "+I[4, 3]"])
def test_set_environment(self):
python_exec_link_path = sys.executable
self.t_env.get_config().set_python_executable(python_exec_link_path)
def check_python_exec(i):
import os
assert os.environ["python"] == python_exec_link_path
return i
self.t_env.create_temporary_system_function(
"check_python_exec",
udf(check_python_exec, DataTypes.BIGINT(), DataTypes.BIGINT()))
def check_pyflink_gateway_disabled(i):
from pyflink.java_gateway import get_gateway
get_gateway()
return i
self.t_env.create_temporary_system_function(
"check_pyflink_gateway_disabled",
udf(check_pyflink_gateway_disabled, DataTypes.BIGINT(),
DataTypes.BIGINT()))
sink_table_ddl = """
CREATE TABLE Results(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
t = self.t_env.from_elements([(1, 2), (2, 5), (3, 1)], ['a', 'b'])
t.select(
expr.call('check_python_exec', t.a),
expr.call('check_pyflink_gateway_disabled', t.a)) \
.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 1]", "+I[2, 2]", "+I[3, 3]"])
if __name__ == "__main__":
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 11,429 | 45.653061 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_table_schema.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table.types import DataTypes
from pyflink.table.table_schema import TableSchema
from pyflink.testing.test_case_utils import PyFlinkTestCase
class TableSchemaTests(PyFlinkTestCase):
def test_init(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
self.assertEqual(3, schema.get_field_count())
self.assertEqual(["a", "b", "c"], schema.get_field_names())
self.assertEqual([DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()],
schema.get_field_data_types())
def test_copy(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
copied_schema = schema.copy()
self.assertEqual(schema, copied_schema)
copied_schema._j_table_schema = None
self.assertNotEqual(schema, copied_schema)
def test_get_field_data_types(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
types = schema.get_field_data_types()
self.assertEqual([DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()], types)
def test_get_field_data_type(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
type_by_name = schema.get_field_data_type("b")
type_by_index = schema.get_field_data_type(2)
type_by_name_not_exist = schema.get_field_data_type("d")
type_by_index_not_exist = schema.get_field_data_type(6)
with self.assertRaises(TypeError):
schema.get_field_data_type(None)
self.assertEqual(DataTypes.BIGINT(), type_by_name)
self.assertEqual(DataTypes.STRING(), type_by_index)
self.assertIsNone(type_by_name_not_exist)
self.assertIsNone(type_by_index_not_exist)
def test_get_field_count(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
count = schema.get_field_count()
self.assertEqual(3, count)
def test_get_field_names(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
names = schema.get_field_names()
self.assertEqual(["a", "b", "c"], names)
def test_get_field_name(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
field_name = schema.get_field_name(2)
field_name_not_exist = schema.get_field_name(3)
self.assertEqual("c", field_name)
self.assertIsNone(field_name_not_exist)
def test_to_row_data_type(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
row_type = schema.to_row_data_type()
expected = DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.STRING())],
nullable=False)
self.assertEqual(expected, row_type)
def test_hash(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
schema2 = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
self.assertEqual(hash(schema2), hash(schema))
def test_str(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
expected = "root\n |-- a: INT\n |-- b: BIGINT\n |-- c: STRING\n"
self.assertEqual(expected, str(schema))
def test_repr(self):
schema = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
expected = "root\n |-- a: INT\n |-- b: BIGINT\n |-- c: STRING\n"
self.assertEqual(expected, repr(schema))
def test_builder(self):
schema_builder = TableSchema.builder()
schema = schema_builder \
.field("a", DataTypes.INT())\
.field("b", DataTypes.BIGINT())\
.field("c", DataTypes.STRING()).build()
expected = TableSchema(["a", "b", "c"],
[DataTypes.INT(), DataTypes.BIGINT(), DataTypes.STRING()])
self.assertEqual(expected, schema)
| 5,722 | 39.878571 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_column_operation.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableColumnsOperationTests(PyFlinkStreamTableTestCase):
def test_add_columns(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.select(t.a).add_columns((t.a + 1).alias('b'), (t.a + 2).alias('c'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('[a, plus(a, 1), '
'plus(a, 2)]',
query_operation.getProjectList().toString())
def test_add_or_replace_columns(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.select(t.a).add_or_replace_columns((t.a + 1).alias('b'), (t.a + 2).alias('a'))
query_operation = result._j_table.getQueryOperation()
self.assertEqual('[plus(a, 2), '
'plus(a, 1)]',
query_operation.getProjectList().toString())
def test_rename_columns(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.select(t.a, t.b, t.c) \
.rename_columns(t.a.alias('d'), t.c.alias('f'), t.b.alias('e'))
resolved_schema = result._j_table.getQueryOperation().getResolvedSchema()
self.assertEqual(['d', 'e', 'f'], list(resolved_schema.getColumnNames()))
def test_drop_columns(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.drop_columns(t.a, t.c)
query_operation = result._j_table.getQueryOperation()
self.assertEqual('[b]', query_operation.getProjectList().toString())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,892 | 44.920635 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_udtf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pyflink.table import DataTypes
from pyflink.table.udf import TableFunction, udtf, ScalarFunction, udf
from pyflink.table.expressions import col
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkBatchTableTestCase
class UserDefinedTableFunctionTests(object):
def test_table_function(self):
self.t_env.execute_sql("""
CREATE TABLE Results_test_table_function(
a BIGINT,
b BIGINT,
c BIGINT
) WITH ('connector'='test-sink')""")
multi_emit = udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()])
multi_num = udf(MultiNum(), result_type=DataTypes.BIGINT())
t = self.t_env.from_elements([(1, 1, 3), (2, 1, 6), (3, 2, 9)], ['a', 'b', 'c'])
t = t.join_lateral(multi_emit((t.a + t.a) / 2, multi_num(t.b)).alias('x', 'y'))
t = t.left_outer_join_lateral(condition_multi_emit(t.x, t.y).alias('m')) \
.select(t.x, t.y, col("m"))
t = t.left_outer_join_lateral(identity(t.m).alias('n')) \
.select(t.x, t.y, col("n"))
t.execute_insert("Results_test_table_function").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 0, null]", "+I[1, 1, null]", "+I[2, 0, null]", "+I[2, 1, null]",
"+I[3, 0, 0]", "+I[3, 0, 1]", "+I[3, 0, 2]", "+I[3, 1, 1]",
"+I[3, 1, 2]", "+I[3, 2, 2]", "+I[3, 3, null]"])
def test_table_function_with_sql_query(self):
self.t_env.execute_sql("""
CREATE TABLE Results_test_table_function_with_sql_query(
a BIGINT,
b BIGINT,
c BIGINT
) WITH ('connector'='test-sink')""")
self.t_env.create_temporary_system_function(
"multi_emit", udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()]))
t = self.t_env.from_elements([(1, 1, 3), (2, 1, 6), (3, 2, 9)], ['a', 'b', 'c'])
self.t_env.create_temporary_view("MyTable", t)
t = self.t_env.sql_query(
"SELECT a, x, y FROM MyTable LEFT JOIN LATERAL TABLE(multi_emit(a, b)) as T(x, y)"
" ON TRUE")
t.execute_insert("Results_test_table_function_with_sql_query").wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 1, 0]", "+I[2, 2, 0]", "+I[3, 3, 0]", "+I[3, 3, 1]"])
class PyFlinkStreamUserDefinedFunctionTests(UserDefinedTableFunctionTests,
PyFlinkStreamTableTestCase):
def test_execute_from_json_plan(self):
# create source file path
tmp_dir = self.tempdir
data = ['1,1', '3,2', '2,1']
source_path = tmp_dir + '/test_execute_from_json_plan_input.csv'
sink_path = tmp_dir + '/test_execute_from_json_plan_out'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
source_table = """
CREATE TABLE source_table (
a BIGINT,
b BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % source_path
self.t_env.execute_sql(source_table)
self.t_env.execute_sql("""
CREATE TABLE sink_table (
a BIGINT,
b BIGINT,
c BIGINT
) WITH (
'connector' = 'filesystem',
'path' = '%s',
'format' = 'csv'
)
""" % sink_path)
self.t_env.create_temporary_system_function(
"multi_emit2", udtf(MultiEmit(), result_types=[DataTypes.BIGINT(), DataTypes.BIGINT()]))
json_plan = self.t_env._j_tenv.compilePlanSql("INSERT INTO sink_table "
"SELECT a, x, y FROM source_table "
"LEFT JOIN LATERAL TABLE(multi_emit2(a, b))"
" as T(x, y)"
" ON TRUE")
from py4j.java_gateway import get_method
get_method(json_plan.execute(), "await")()
import glob
lines = [line.strip() for file in glob.glob(sink_path + '/*') for line in open(file, 'r')]
lines.sort()
self.assertEqual(lines, ['1,1,0', '2,2,0', '3,3,0', '3,3,1'])
class PyFlinkBatchUserDefinedFunctionTests(UserDefinedTableFunctionTests,
PyFlinkBatchTableTestCase):
pass
class PyFlinkEmbeddedThreadTests(UserDefinedTableFunctionTests, PyFlinkStreamTableTestCase):
def setUp(self):
super(PyFlinkEmbeddedThreadTests, self).setUp()
self.t_env.get_config().set("python.execution-mode", "thread")
class MultiEmit(TableFunction, unittest.TestCase):
def open(self, function_context):
self.counter_sum = 0
def eval(self, x, y):
self.counter_sum += y
for i in range(y):
yield x, i
@udtf(result_types=['bigint'])
def identity(x):
if x is not None:
from pyflink.common import Row
return Row(x)
# test specify the input_types
@udtf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()],
result_types=DataTypes.BIGINT())
def condition_multi_emit(x, y):
if x == 3:
return range(y, x)
class MultiNum(ScalarFunction):
def eval(self, x):
return x * 2
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 6,876 | 36.994475 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_pandas_conversion.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
from pandas.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table.types import DataTypes
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase
class PandasConversionTestBase(object):
@classmethod
def setUpClass(cls):
super(PandasConversionTestBase, cls).setUpClass()
cls.data = [(1, 1, 1, 1, True, 1.1, 1.2, 'hello', bytearray(b"aaa"),
decimal.Decimal('1000000000000000000.01'), datetime.date(2014, 9, 13),
datetime.time(hour=1, minute=0, second=1),
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],
Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),
d=[1, 2])),
(1, 2, 2, 2, False, 2.1, 2.2, 'world', bytearray(b"bbb"),
decimal.Decimal('1000000000000000000.02'), datetime.date(2014, 9, 13),
datetime.time(hour=1, minute=0, second=1),
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000), ['hello', '中文'],
Row(a=1, b='hello', c=datetime.datetime(1970, 1, 1, 0, 0, 0, 123000),
d=[1, 2]))]
cls.data_type = DataTypes.ROW(
[DataTypes.FIELD("f1", DataTypes.TINYINT()),
DataTypes.FIELD("f2", DataTypes.SMALLINT()),
DataTypes.FIELD("f3", DataTypes.INT()),
DataTypes.FIELD("f4", DataTypes.BIGINT()),
DataTypes.FIELD("f5", DataTypes.BOOLEAN()),
DataTypes.FIELD("f6", DataTypes.FLOAT()),
DataTypes.FIELD("f7", DataTypes.DOUBLE()),
DataTypes.FIELD("f8", DataTypes.STRING()),
DataTypes.FIELD("f9", DataTypes.BYTES()),
DataTypes.FIELD("f10", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("f11", DataTypes.DATE()),
DataTypes.FIELD("f12", DataTypes.TIME()),
DataTypes.FIELD("f13", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("f14", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("f15", DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.INT()),
DataTypes.FIELD("b", DataTypes.STRING()),
DataTypes.FIELD("c", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("d", DataTypes.ARRAY(DataTypes.INT()))]))], False)
cls.pdf = cls.create_pandas_data_frame()
@classmethod
def create_pandas_data_frame(cls):
data_dict = {}
for j, name in enumerate(cls.data_type.names):
data_dict[name] = [cls.data[i][j] for i in range(len(cls.data))]
# need convert to numpy types
import numpy as np
data_dict["f1"] = np.int8(data_dict["f1"])
data_dict["f2"] = np.int16(data_dict["f2"])
data_dict["f3"] = np.int32(data_dict["f3"])
data_dict["f4"] = np.int64(data_dict["f4"])
data_dict["f6"] = np.float32(data_dict["f6"])
data_dict["f7"] = np.float64(data_dict["f7"])
data_dict["f15"] = [row.as_dict() for row in data_dict["f15"]]
import pandas as pd
return pd.DataFrame(data=data_dict,
index=[2., 3.],
columns=['f1', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9',
'f10', 'f11', 'f12', 'f13', 'f14', 'f15'])
class PandasConversionTests(PandasConversionTestBase):
def test_from_pandas_with_incorrect_schema(self):
fields = self.data_type.fields.copy()
fields[0], fields[7] = fields[7], fields[0] # swap str with tinyint
wrong_schema = DataTypes.ROW(fields) # should be DataTypes.STRING()
with self.assertRaisesRegex(Exception, "Expected a string.*got int8"):
self.t_env.from_pandas(self.pdf, schema=wrong_schema)
def test_from_pandas_with_names(self):
# skip decimal as currently only decimal(38, 18) is supported
pdf = self.pdf.drop(['f10', 'f11', 'f12', 'f13', 'f14', 'f15'], axis=1)
new_names = list(map(str, range(len(pdf.columns))))
table = self.t_env.from_pandas(pdf, schema=new_names)
self.assertEqual(new_names, table.get_schema().get_field_names())
table = self.t_env.from_pandas(pdf, schema=tuple(new_names))
self.assertEqual(new_names, table.get_schema().get_field_names())
def test_from_pandas_with_types(self):
new_types = self.data_type.field_types()
new_types[0] = DataTypes.BIGINT()
table = self.t_env.from_pandas(self.pdf, schema=new_types)
self.assertEqual(new_types, table.get_schema().get_field_data_types())
table = self.t_env.from_pandas(self.pdf, schema=tuple(new_types))
self.assertEqual(new_types, table.get_schema().get_field_data_types())
class PandasConversionITTests(PandasConversionTestBase):
def test_from_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type, 5)
self.assertEqual(self.data_type, table.get_schema().to_row_data_type())
table = table.filter(table.f2 < 2)
sink_table_ddl = """
CREATE TABLE Results(
f1 TINYINT,
f2 SMALLINT,
f3 INT,
f4 BIGINT,
f5 BOOLEAN,
f6 FLOAT,
f7 DOUBLE,
f8 STRING,
f9 BYTES,
f10 DECIMAL(38, 18),
f11 DATE,
f12 TIME,
f13 TIMESTAMP(3),
f14 ARRAY<STRING>,
f15 ROW<a INT, b STRING, c TIMESTAMP(3), d ARRAY<INT>>)
WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
table.execute_insert("Results").wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[1, 1, 1, 1, true, 1.1, 1.2, hello, [97, 97, 97], "
"1000000000000000000.010000000000000000, 2014-09-13, 01:00:01, "
"1970-01-01T00:00:00.123, [hello, 中文], +I[1, hello, "
"1970-01-01T00:00:00.123, [1, 2]]]"])
def test_to_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
result_pdf = table.to_pandas()
result_pdf.index = self.pdf.index
self.assertEqual(2, len(result_pdf))
expected_arrow = self.pdf.to_records(index=False)
result_arrow = result_pdf.to_records(index=False)
for r in range(len(expected_arrow)):
for e in range(len(expected_arrow[r])):
self.assert_equal_field(expected_arrow[r][e], result_arrow[r][e])
def test_empty_to_pandas(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
pdf = table.filter(table.f1 < 0).to_pandas()
self.assertTrue(pdf.empty)
def test_to_pandas_for_retract_table(self):
table = self.t_env.from_pandas(self.pdf, self.data_type)
result_pdf = table.group_by(table.f1).select(table.f2.max.alias('f2')).to_pandas()
import pandas as pd
import numpy as np
assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int16([2])}))
result_pdf = table.group_by(table.f2).select(table.f1.max.alias('f2')).to_pandas()
assert_frame_equal(result_pdf, pd.DataFrame(data={'f2': np.int8([1, 1])}))
def assert_equal_field(self, expected_field, result_field):
import numpy as np
result_type = type(result_field)
if result_type == dict:
self.assertEqual(expected_field.keys(), result_field.keys())
for key in expected_field:
self.assert_equal_field(expected_field[key], result_field[key])
elif result_type == np.ndarray:
self.assertTrue((expected_field == result_field).all())
else:
self.assertTrue(expected_field == result_field)
class BatchPandasConversionTests(PandasConversionTests,
PandasConversionITTests,
PyFlinkBatchTableTestCase):
pass
class StreamPandasConversionTests(PandasConversionITTests,
PyFlinkStreamTableTestCase):
def test_to_pandas_with_event_time(self):
self.t_env.get_config().set("parallelism.default", "1")
# create source file path
import tempfile
import os
tmp_dir = tempfile.gettempdir()
data = [
'2018-03-11 03:10:00',
'2018-03-11 03:10:00',
'2018-03-11 03:10:00',
'2018-03-11 03:40:00',
'2018-03-11 04:20:00',
'2018-03-11 03:30:00'
]
source_path = tmp_dir + '/test_to_pandas_with_event_time.csv'
with open(source_path, 'w') as fd:
for ele in data:
fd.write(ele + '\n')
self.t_env.get_config().set(
"pipeline.time-characteristic", "EventTime")
source_table = """
create table source_table(
rowtime TIMESTAMP(3),
WATERMARK FOR rowtime AS rowtime - INTERVAL '60' MINUTE
) with(
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '%s',
'format.ignore-first-line' = 'false',
'format.field-delimiter' = ','
)
""" % source_path
self.t_env.execute_sql(source_table)
t = self.t_env.from_path("source_table")
result_pdf = t.to_pandas()
import pandas as pd
os.remove(source_path)
assert_frame_equal(result_pdf, pd.DataFrame(
data={"rowtime": [
datetime.datetime(2018, 3, 11, 3, 10),
datetime.datetime(2018, 3, 11, 3, 10),
datetime.datetime(2018, 3, 11, 3, 10),
datetime.datetime(2018, 3, 11, 3, 40),
datetime.datetime(2018, 3, 11, 4, 20),
datetime.datetime(2018, 3, 11, 3, 30),
]}))
| 11,096 | 43.745968 | 92 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_environment_settings_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import EnvironmentSettings
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
class EnvironmentSettingsCompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`EnvironmentSettings` is consistent with
Java `org.apache.flink.table.api.EnvironmentSettings`.
"""
@classmethod
def python_class(cls):
return EnvironmentSettings
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.EnvironmentSettings"
@classmethod
def excluded_methods(cls):
# internal interfaces, no need to expose to users.
return {'getPlanner', 'getExecutor', 'getUserClassLoader'}
class EnvironmentSettingsBuilderCompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`EnvironmentSettings.Builder` is consistent with
Java `org.apache.flink.table.api.EnvironmentSettings$Builder`.
"""
@classmethod
def python_class(cls):
return EnvironmentSettings.Builder
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.EnvironmentSettings$Builder"
@classmethod
def excluded_methods(cls):
# internal interfaces, no need to expose to users.
return {'withClassLoader'}
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,581 | 35.366197 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_environment_settings.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.common import Configuration
from pyflink.table import EnvironmentSettings
from pyflink.testing.test_case_utils import PyFlinkTestCase
class EnvironmentSettingsTests(PyFlinkTestCase):
def test_mode_selection(self):
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_streaming_mode
environment_settings = builder.in_streaming_mode().build()
self.assertTrue(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_streaming_mode()
self.assertTrue(environment_settings.is_streaming_mode())
# test in_batch_mode
environment_settings = builder.in_batch_mode().build()
self.assertFalse(environment_settings.is_streaming_mode())
environment_settings = EnvironmentSettings.in_batch_mode()
self.assertFalse(environment_settings.is_streaming_mode())
def test_with_built_in_catalog_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_CATALOG = gateway.jvm.TableConfigOptions.TABLE_CATALOG_NAME.defaultValue()
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), DEFAULT_BUILTIN_CATALOG)
environment_settings = builder.with_built_in_catalog_name("my_catalog").build()
self.assertEqual(environment_settings.get_built_in_catalog_name(), "my_catalog")
def test_with_built_in_database_name(self):
gateway = get_gateway()
DEFAULT_BUILTIN_DATABASE = gateway.jvm.TableConfigOptions.TABLE_DATABASE_NAME.defaultValue()
builder = EnvironmentSettings.new_instance()
# test the default behaviour to make sure it is consistent with the python doc
environment_settings = builder.build()
self.assertEqual(environment_settings.get_built_in_database_name(),
DEFAULT_BUILTIN_DATABASE)
environment_settings = builder.with_built_in_database_name("my_database").build()
self.assertEqual(environment_settings.get_built_in_database_name(), "my_database")
def test_to_configuration(self):
expected_settings = EnvironmentSettings.new_instance().in_batch_mode().build()
config = expected_settings.get_configuration()
self.assertEqual("BATCH", config.get_string("execution.runtime-mode", "stream"))
def test_from_configuration(self):
config = Configuration()
config.set_string("execution.runtime-mode", "batch")
actual_setting = EnvironmentSettings.new_instance().with_configuration(config).build()
self.assertFalse(actual_setting.is_streaming_mode(), "Use batch mode.")
| 4,009 | 39.918367 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_sql.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import glob
import os
import subprocess
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.table import ResultKind, ExplainDetail
from pyflink.table import expressions as expr
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkTestCase
class StreamSqlTests(PyFlinkStreamTableTestCase):
def test_sql_ddl(self):
self.t_env.execute_sql("create temporary function func1 as "
"'pyflink.table.tests.test_udf.add' language python")
table = self.t_env.from_elements([(1, 2)]) \
.alias("a", "b") \
.select(expr.call("func1", expr.col("a"), expr.col("b")))
plan = table.explain()
self.assertGreaterEqual(plan.find("== Optimized Physical Plan =="), 0)
self.assertGreaterEqual(plan.find("PythonCalc(select=[func1(f0, f1) AS _c0])"), 0)
plan = table.explain(ExplainDetail.PLAN_ADVICE)
self.assertGreaterEqual(plan.find("== Optimized Physical Plan With Advice =="), 0)
self.assertGreaterEqual(plan.find("No available advice..."), 0)
def test_sql_query(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
sink_table_ddl = """
CREATE TABLE sinks_sql_query(a BIGINT, b STRING, c STRING) WITH ('connector'='test-sink')
"""
t_env.execute_sql(sink_table_ddl)
result = t_env.sql_query("select a + 1, b, c from %s" % source)
result.execute_insert("sinks_sql_query").wait()
actual = source_sink_utils.results()
expected = ['+I[2, Hi, Hello]', '+I[3, Hello, Hello]']
self.assert_equals(actual, expected)
def test_execute_sql(self):
t_env = self.t_env
table_result = t_env.execute_sql("create table tbl"
"("
" a bigint,"
" b int,"
" c varchar"
") with ("
" 'connector' = 'COLLECTION',"
" 'is-bounded' = 'false'"
")")
self.assertIsNone(table_result.get_job_client())
self.assert_equals(table_result.get_table_schema().get_field_names(), ["result"])
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS)
table_result.print()
table_result = t_env.execute_sql("alter table tbl set ('k1' = 'a', 'k2' = 'b')")
self.assertIsNone(table_result.get_job_client())
self.assert_equals(table_result.get_table_schema().get_field_names(), ["result"])
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS)
table_result.print()
sink_table_ddl = """
CREATE TABLE sinks(k1 BIGINT, k2 INT, c STRING) WITH ('connector'='test-sink')
"""
t_env.execute_sql(sink_table_ddl)
table_result = t_env.execute_sql("insert into sinks select * from tbl")
job_execution_result = table_result.get_job_client().get_job_execution_result().result()
self.assertIsNotNone(job_execution_result.get_job_id())
self.assert_equals(table_result.get_table_schema().get_field_names(),
["default_catalog.default_database.sinks"])
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
table_result.print()
table_result = t_env.execute_sql("drop table tbl")
self.assertIsNone(table_result.get_job_client())
self.assert_equals(table_result.get_table_schema().get_field_names(), ["result"])
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS)
table_result.print()
class JavaSqlTests(PyFlinkTestCase):
"""
We need to start these Java tests from python process to make sure that Python environment is
available when the tests are running.
"""
@staticmethod
def get_classpath():
return get_gateway().jvm.System.getProperties().get("java.class.path")
@staticmethod
def get_java_executable():
return get_gateway().jvm.System.getProperty("java.home") + "/bin/java"
def get_jar_path(self, jar_path_pattern):
test_jar_path = glob.glob(os.path.join(_find_flink_source_root(), jar_path_pattern))
if not test_jar_path:
self.fail("'%s' is not available. Please compile the test jars first."
% jar_path_pattern)
if len(test_jar_path) > 1:
self.fail("There are multiple jars matches the pattern: %s, the jars are: %s"
% (jar_path_pattern, test_jar_path))
return test_jar_path[0]
def test_java_sql_ddl(self):
test_class = "org.apache.flink.client.python.PythonFunctionFactoryTest"
test_jar_pattern = "flink-python/target/artifacts/testJavaDdl.jar"
test_jar_path = self.get_jar_path(test_jar_pattern)
test_classpath = self.get_classpath() + os.pathsep + test_jar_path
java_executable = self.get_java_executable()
subprocess.check_output([java_executable,
"-XX:+IgnoreUnrecognizedVMOptions",
"--add-opens=java.base/java.lang=ALL-UNNAMED",
"-cp", test_classpath, test_class], shell=False)
| 6,557 | 46.521739 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_table_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table import Table
class TableAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`Table` is consistent with
Java `org.apache.flink.table.api.Table`.
"""
@classmethod
def python_class(cls):
return Table
@classmethod
def java_class(cls):
return "org.apache.flink.table.api.Table"
@classmethod
def excluded_methods(cls):
# getSchema method returns a TableSchema, the implementation of TableSchema requires a
# complete type system, which does not exist currently. It will be implemented after
# FLINK-12408 is merged. So we exclude this method for the time being.
# Also FLINK-25986 are excluded.
return {'createTemporalTableFunction', 'getQueryOperation', 'getResolvedSchema',
'insertInto', 'printExplain'}
@classmethod
def java_method_name(cls, python_method_name):
"""
Due to 'as' is python keyword, so we use 'alias'
in Python API corresponding 'as' in Java API.
:param python_method_name:
:return:
"""
return {'alias': 'as'}.get(python_method_name, python_method_name)
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,535 | 36.850746 | 94 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_catalog_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
from pyflink.table.catalog import Catalog, CatalogDatabase, CatalogBaseTable, CatalogPartition, \
CatalogFunction, CatalogColumnStatistics, CatalogPartitionSpec, ObjectPath
class CatalogAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`Catalog` is consistent with
Java `org.apache.flink.table.catalog.Catalog`.
"""
@classmethod
def python_class(cls):
return Catalog
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.Catalog"
@classmethod
def excluded_methods(cls):
# open/close are not needed in Python API as they are used internally
return {
'open',
'close',
'getFactory',
'getTableFactory',
'getFunctionDefinitionFactory',
'listPartitionsByFilter',
'supportsManagedTable'}
class CatalogDatabaseAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`CatalogDatabase` is consistent with
Java `org.apache.flink.table.catalog.CatalogDatabase`.
"""
@classmethod
def python_class(cls):
return CatalogDatabase
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.CatalogDatabase"
class CatalogBaseTableAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`CatalogBaseTable` is consistent with
Java `org.apache.flink.table.catalog.CatalogBaseTable`.
"""
@classmethod
def python_class(cls):
return CatalogBaseTable
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.CatalogBaseTable"
@classmethod
def excluded_methods(cls):
return {'getUnresolvedSchema', 'getTableKind'}
class CatalogFunctionAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`CatalogFunction` is consistent with
Java `org.apache.flink.table.catalog.CatalogFunction`.
"""
@classmethod
def python_class(cls):
return CatalogFunction
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.CatalogFunction"
@classmethod
def excluded_methods(cls):
return {'getFunctionResources'}
class CatalogPartitionAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`CatalogPartition` is consistent with
Java `org.apache.flink.table.catalog.CatalogPartition`.
"""
@classmethod
def python_class(cls):
return CatalogPartition
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.CatalogPartition"
class ObjectPathAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`ObjectPath` is consistent with
Java `org.apache.flink.table.catalog.ObjectPath`.
"""
@classmethod
def python_class(cls):
return ObjectPath
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.ObjectPath"
class CatalogPartitionSpecAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`CatalogPartitionSpec` is consistent with
Java `org.apache.flink.table.catalog.CatalogPartitionSpec`.
"""
@classmethod
def python_class(cls):
return CatalogPartitionSpec
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.CatalogPartitionSpec"
class CatalogColumnStatisticsAPICompletenessTests(PythonAPICompletenessTestCase, PyFlinkTestCase):
"""
Tests whether the Python :class:`CatalogColumnStatistics` is consistent with
Java `org.apache.flink.table.catalog.CatalogColumnStatistics`.
"""
@classmethod
def python_class(cls):
return CatalogColumnStatistics
@classmethod
def java_class(cls):
return "org.apache.flink.table.catalog.stats.CatalogColumnStatistics"
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 5,451 | 30.514451 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from py4j.protocol import Py4JJavaError
from pyflink.table import expressions as expr
from pyflink.table.window import Session, Slide, Tumble, Over
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, PyFlinkTestCase
class StreamTableWindowTests(PyFlinkStreamTableTestCase):
def test_over_window(self):
t_env = self.t_env
t = t_env.from_elements([(1, 1, "Hello")], ['a', 'b', 'c'])
result = t.over_window(
Over.partition_by(t.c)
.order_by(t.a)
.preceding(expr.row_interval(2))
.following(expr.CURRENT_ROW)
.alias("w"))
self.assertRaisesRegex(
Py4JJavaError, "Ordering must be defined on a time attribute",
result.select, expr.col("b").sum.over(expr.col("w")))
class BatchTableWindowTests(PyFlinkTestCase):
def setUp(self):
from pyflink.table import TableEnvironment
from pyflink.table import EnvironmentSettings
self.t_env = TableEnvironment.create(EnvironmentSettings.in_batch_mode())
def test_tumble_window(self):
t = self.t_env.from_elements([(1, 1, "Hello")], ["a", "b", "c"])
result = t.window(Tumble.over(expr.row_interval(2)).on(expr.col("a")).alias("w"))\
.group_by(expr.col('w'), expr.col('c')).select(t.b.sum)
query_operation = result._j_table.getQueryOperation().getChildren().get(0)
self.assertEqual('[c]', query_operation.getGroupingExpressions().toString())
self.assertEqual('TumbleWindow(field: [a], size: [2])',
query_operation.getGroupWindow().asSummaryString())
def test_slide_window(self):
t = self.t_env.from_elements([(1000, 1, "Hello")], ["a", "b", "c"])
result = t.window(Slide.over(expr.lit(2).seconds).every(expr.lit(1).seconds).on(t.a)
.alias("w")).group_by(expr.col('w'), expr.col('c')).select(t.b.sum)
query_operation = result._j_table.getQueryOperation().getChildren().get(0)
self.assertEqual('[c]', query_operation.getGroupingExpressions().toString())
self.assertEqual('SlideWindow(field: [a], slide: [1000], size: [2000])',
query_operation.getGroupWindow().asSummaryString())
def test_session_window(self):
t = self.t_env.from_elements([(1000, 1, "Hello")], ["a", "b", "c"])
result = t.window(Session.with_gap(expr.lit(1).seconds).on(t.a).alias("w"))\
.group_by(expr.col('w'), expr.col('c')).select(t.b.sum)
query_operation = result._j_table.getQueryOperation().getChildren().get(0)
self.assertEqual('[c]', query_operation.getGroupingExpressions().toString())
self.assertEqual('SessionWindow(field: [a], gap: [1000])',
query_operation.getGroupWindow().asSummaryString())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 4,079 | 43.835165 | 93 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_row_based_operation.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import unittest
from pandas.testing import assert_frame_equal
from pyflink.common import Row
from pyflink.table import expressions as expr, ListView
from pyflink.table.types import DataTypes
from pyflink.table.udf import udf, udtf, udaf, AggregateFunction, TableAggregateFunction, udtaf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkBatchTableTestCase, \
PyFlinkStreamTableTestCase
class RowBasedOperationTests(object):
def test_map(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
sink_table_ddl = """
CREATE TABLE Results_test_map(a BIGINT, b BIGINT) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
func = udf(lambda x: Row(a=x + 1, b=x * x), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
func2 = udf(lambda x: Row(x.a + 1, x.b * 2), result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())]))
t.map(func(t.b)).alias("a", "b") \
.map(func(t.a)) \
.map(func2) \
.execute_insert("Results_test_map") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual, ["+I[5, 18]", "+I[4, 8]", "+I[8, 72]", "+I[11, 162]", "+I[6, 32]"])
def test_map_with_pandas_udf(self):
t = self.t_env.from_elements(
[(1, Row(2, 3)), (2, Row(1, 3)), (1, Row(5, 4)), (1, Row(8, 6)), (2, Row(3, 4))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b",
DataTypes.ROW([DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("d", DataTypes.INT())]))]))
sink_table_ddl = """
CREATE TABLE Results_test_map_with_pandas_udf(
a BIGINT,
b BIGINT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
def func(x):
import pandas as pd
res = pd.concat([x.a, x.c + x.d], axis=1)
return res
def func2(x):
return x * 2
def func3(x):
assert isinstance(x, Row)
return x
pandas_udf = udf(func,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
pandas_udf_2 = udf(func2,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]),
func_type='pandas')
general_udf = udf(func3,
result_type=DataTypes.ROW(
[DataTypes.FIELD("c", DataTypes.BIGINT()),
DataTypes.FIELD("d", DataTypes.BIGINT())]))
t.map(pandas_udf).map(pandas_udf_2).map(general_udf).execute_insert(
"Results_test_map_with_pandas_udf").wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[4, 8]", "+I[2, 10]", "+I[2, 28]", "+I[2, 18]", "+I[4, 14]"])
def test_flat_map(self):
t = self.t_env.from_elements(
[(1, "2,3"), (2, "1"), (1, "5,6,7")],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.STRING())]))
sink_table_ddl = """
CREATE TABLE Results_test_flat_map(
a BIGINT, b STRING, c BIGINT, d STRING, e BIGINT, f STRING
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
@udtf(result_types=[DataTypes.INT(), DataTypes.STRING()])
def split(x):
for s in x.b.split(","):
yield x.a, s
t.flat_map(split).alias("a", "b") \
.flat_map(split).alias("a", "b") \
.join_lateral(split.alias("c", "d")) \
.left_outer_join_lateral(split.alias("e", "f")) \
.execute_insert("Results_test_flat_map") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(
actual,
["+I[1, 2, 1, 2, 1, 2]", "+I[1, 3, 1, 3, 1, 3]", "+I[2, 1, 2, 1, 2, 1]",
"+I[1, 5, 1, 5, 1, 5]", "+I[1, 6, 1, 6, 1, 6]", "+I[1, 7, 1, 7, 1, 7]"])
class BatchRowBasedOperationITTests(RowBasedOperationTests, PyFlinkBatchTableTestCase):
def test_aggregate_with_pandas_udaf(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
sink_table_ddl = """
CREATE TABLE Results_test_aggregate_with_pandas_udaf(
a TINYINT,
b FLOAT,
c INT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.a.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.select(t.a, t.b) \
.group_by(t.a) \
.aggregate(pandas_udaf) \
.select(expr.col("*")) \
.execute_insert("Results_test_aggregate_with_pandas_udaf") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[1, 5.0, 1]", "+I[2, 2.0, 2]"])
def test_aggregate_with_pandas_udaf_without_keys(self):
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
sink_table_ddl = """
CREATE TABLE Results_test_aggregate_with_pandas_udaf_without_keys(
a FLOAT,
b INT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
pandas_udaf = udaf(lambda pd: Row(pd.b.mean(), pd.b.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
t.select(t.b) \
.aggregate(pandas_udaf.alias("a", "b")) \
.select(t.a, t.b) \
.execute_insert("Results_test_aggregate_with_pandas_udaf_without_keys") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual, ["+I[3.8, 8]"])
@unittest.skip("Not supported yet")
def test_window_aggregate_with_pandas_udaf(self):
import datetime
from pyflink.table.window import Tumble
t = self.t_env.from_elements(
[
(1, 2, 3, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(3, 2, 4, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(2, 1, 2, datetime.datetime(2018, 3, 11, 3, 10, 0, 0)),
(1, 3, 1, datetime.datetime(2018, 3, 11, 3, 40, 0, 0)),
(1, 8, 5, datetime.datetime(2018, 3, 11, 4, 20, 0, 0)),
(2, 3, 6, datetime.datetime(2018, 3, 11, 3, 30, 0, 0))
],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("rowtime", DataTypes.TIMESTAMP(3))]))
sink_table_ddl = """
CREATE TABLE Results_test_window_aggregate_with_pandas_udaf(
a TIMESTAMP(3),
b FLOAT,
c INT
) WITH ('connector'='test-sink')
"""
self.t_env.execute_sql(sink_table_ddl)
print(t.get_schema())
pandas_udaf = udaf(lambda pd: (pd.b.mean(), pd.b.max()),
result_type=DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.FLOAT()),
DataTypes.FIELD("b", DataTypes.INT())]),
func_type="pandas")
tumble_window = Tumble.over(expr.lit(1).hours) \
.on(expr.col("rowtime")) \
.alias("w")
t.select(t.b, t.rowtime) \
.window(tumble_window) \
.group_by(expr.col("w")) \
.aggregate(pandas_udaf.alias("d", "e")) \
.select(expr.col("w").rowtime, expr.col("d"), expr.col("e")) \
.execute_insert("Results_test_window_aggregate_with_pandas_udaf") \
.wait()
actual = source_sink_utils.results()
self.assert_equals(actual,
["+I[2018-03-11 03:59:59.999, 2.2, 3]",
"+I[2018-03-11 04:59:59.999, 8.0, 8]"])
class StreamRowBasedOperationITTests(RowBasedOperationTests, PyFlinkStreamTableTestCase):
def test_aggregate(self):
import pandas as pd
t = self.t_env.from_elements(
[(1, 2, 3), (2, 1, 3), (1, 5, 4), (1, 8, 6), (2, 3, 4)],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT())]))
function = CountAndSumAggregateFunction()
agg = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
result = t.group_by(t.a) \
.aggregate(agg.alias("c", "d")) \
.select(t.a, t.c, expr.col("d")) \
.to_pandas()
assert_frame_equal(result.sort_values('a').reset_index(drop=True),
pd.DataFrame([[1, 3, 15], [2, 2, 4]], columns=['a', 'c', 'd']))
def test_flat_aggregate(self):
import pandas as pd
mytop = udtaf(Top2())
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(5, 'Hi2', 'hi'),
(7, 'Hi', 'Hello'),
(2, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.select(t.a, t.c) \
.group_by(t.c) \
.flat_aggregate(mytop.alias('a')) \
.select(t.a) \
.flat_aggregate(mytop.alias("b")) \
.select(t.b) \
.to_pandas()
assert_frame_equal(result, pd.DataFrame([[7], [5]], columns=['b']))
def test_flat_aggregate_list_view(self):
import pandas as pd
my_concat = udtaf(ListViewConcatTableAggregateFunction())
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "2")
# trigger the cache eviction in a bundle.
self.t_env.get_config().set(
"python.state.cache-size", "2")
t = self.t_env.from_elements([(1, 'Hi', 'Hello'),
(3, 'Hi', 'hi'),
(3, 'Hi2', 'hi'),
(3, 'Hi', 'hi'),
(2, 'Hi', 'Hello'),
(1, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(3, 'Hi2', 'Hello'),
(3, 'Hi3', 'hi'),
(2, 'Hi3', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c) \
.flat_aggregate(my_concat(t.b, ',').alias("b")) \
.select(t.b, t.c) \
.alias("a", "c")
assert_frame_equal(result.to_pandas().sort_values('c').reset_index(drop=True),
pd.DataFrame([["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi,Hi2,Hi2,Hi3", "Hello"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"],
["Hi,Hi2,Hi,Hi3,Hi3", "hi"]],
columns=['a', 'c']))
class CountAndSumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
from pyflink.common import Row
return Row(accumulator[0], accumulator[1])
def create_accumulator(self):
from pyflink.common import Row
return Row(0, 0)
def accumulate(self, accumulator, row: Row):
accumulator[0] += 1
accumulator[1] += row.b
def retract(self, accumulator, row: Row):
accumulator[0] -= 1
accumulator[1] -= row.a
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] += other_acc[0]
accumulator[1] += other_acc[1]
def get_accumulator_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()),
DataTypes.FIELD("b", DataTypes.BIGINT())])
class Top2(TableAggregateFunction):
def emit_value(self, accumulator):
accumulator.sort()
accumulator.reverse()
size = len(accumulator)
if size > 1:
yield accumulator[0]
if size > 2:
yield accumulator[1]
def create_accumulator(self):
return []
def accumulate(self, accumulator, row: Row):
accumulator.append(row.a)
def retract(self, accumulator, row: Row):
accumulator.remove(row.a)
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.BIGINT()
class ListViewConcatTableAggregateFunction(TableAggregateFunction):
def emit_value(self, accumulator):
result = accumulator[1].join(accumulator[0])
yield Row(result)
yield Row(result)
def create_accumulator(self):
return Row(ListView(), '')
def accumulate(self, accumulator, *args):
accumulator[1] = args[1]
accumulator[0].add(args[0])
def retract(self, accumulator, *args):
raise NotImplementedError
def get_accumulator_type(self):
return DataTypes.ROW([
DataTypes.FIELD("f0", DataTypes.LIST_VIEW(DataTypes.STRING())),
DataTypes.FIELD("f1", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW([DataTypes.FIELD("a", DataTypes.STRING())])
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 16,882 | 38.91253 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_distinct.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableDistinctTests(PyFlinkStreamTableTestCase):
def test_distinct(self):
t = self.t_env.from_elements([(1, "Hi", "Hello")], ['a', 'b', 'c'])
result = t.distinct()
query_operation = result._j_table.getQueryOperation()
self.assertEqual('DistinctQueryOperation', query_operation.getClass().getSimpleName())
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 1,649 | 39.243902 | 94 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_table_environment_api.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
################################################################################
import datetime
import decimal
import sys
import unittest
from py4j.protocol import Py4JJavaError
from typing import Iterable
from pyflink.common import RowKind, WatermarkStrategy, Configuration
from pyflink.common.serializer import TypeSerializer
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import MergingWindowAssigner, TimeWindow, Trigger, TriggerResult, OutputTag
from pyflink.datastream.functions import WindowFunction, ProcessFunction
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.datastream.window import TimeWindowSerializer
from pyflink.java_gateway import get_gateway
from pyflink.table import (DataTypes, StreamTableEnvironment, EnvironmentSettings, Module,
ResultKind, ModuleEntry)
from pyflink.table.catalog import ObjectPath, CatalogBaseTable
from pyflink.table.explain_detail import ExplainDetail
from pyflink.table.expressions import col, source_watermark
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.types import RowType, Row, UserDefinedType
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import (PyFlinkStreamTableTestCase, PyFlinkUTTestCase,
_load_specific_flink_module_jars)
from pyflink.util.java_utils import get_j_env_configuration
class TableEnvironmentTest(PyFlinkUTTestCase):
def test_set_sys_executable_for_local_mode(self):
jvm = get_gateway().jvm
actual_executable = get_j_env_configuration(self.t_env._get_j_env()) \
.getString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), None)
self.assertEqual(sys.executable, actual_executable)
def test_explain(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select(t.a + 1, t.b, t.c)
actual = result.explain()
assert isinstance(actual, str)
def test_explain_sql(self):
t_env = self.t_env
actual = t_env.explain_sql("SELECT * FROM (VALUES ('a', 1))")
assert isinstance(actual, str)
def test_explain_with_extended(self):
schema = RowType() \
.add('a', DataTypes.INT()) \
.add('b', DataTypes.STRING()) \
.add('c', DataTypes.STRING())
t_env = self.t_env
t = t_env.from_elements([], schema)
result = t.select(t.a + 1, t.b, t.c)
actual = result.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN, ExplainDetail.PLAN_ADVICE)
assert isinstance(actual, str)
def test_explain_sql_extended(self):
t_env = self.t_env
actual = t_env.explain_sql(
"SELECT * FROM (VALUES ('a', 1))",
ExplainDetail.ESTIMATED_COST,
ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN,
ExplainDetail.PLAN_ADVICE
)
assert isinstance(actual, str)
def test_register_functions(self):
t_env = self.t_env
t_env.create_temporary_system_function(
"python_scalar_func", udf(lambda i: i, result_type=DataTypes.INT()))
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.utils.TestingFunctions$RichFunc0")
t_env.create_java_temporary_system_function(
"agg_func", "org.apache.flink.table.utils.TestingFunctions$ByteMaxAggFunction")
t_env.create_java_temporary_system_function(
"table_func", "org.apache.flink.table.utils.TestingFunctions$TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['python_scalar_func', 'scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_load_module_twice(self):
t_env = self.t_env
self.check_list_modules('core')
self.check_list_full_modules(1, 'core')
self.assertRaisesRegex(
Py4JJavaError, "A module with name 'core' already exists",
t_env.load_module, 'core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
def test_unload_module_twice(self):
t_env = self.t_env
t_env.unload_module('core')
self.check_list_modules()
self.check_list_full_modules(0)
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'core' exists",
t_env.unload_module, 'core')
def test_use_modules(self):
# please do not change this order since ModuleMock depends on FunctionDefinitionMock
_load_specific_flink_module_jars('/flink-table/flink-table-common')
_load_specific_flink_module_jars('/flink-table/flink-table-api-java')
t_env = self.t_env
t_env.load_module('x', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("x")
))
t_env.load_module('y', Module(
get_gateway().jvm.org.apache.flink.table.utils.ModuleMock("y")
))
self.check_list_modules('core', 'x', 'y')
self.check_list_full_modules(3, 'core', 'x', 'y')
t_env.use_modules('y', 'core')
self.check_list_modules('y', 'core')
self.check_list_full_modules(2, 'y', 'core', 'x')
def check_list_modules(self, *expected_used_modules: str):
self.assert_equals(self.t_env.list_modules(), list(expected_used_modules))
def check_list_full_modules(self, used_module_cnt: int, *expected_loaded_modules: str):
self.assert_equals(self.t_env.list_full_modules(),
[ModuleEntry(module,
expected_loaded_modules.index(module) < used_module_cnt)
for module in expected_loaded_modules])
def test_unload_and_load_module(self):
t_env = self.t_env
t_env.unload_module('core')
t_env.load_module('core', Module(
get_gateway().jvm.org.apache.flink.table.module.CoreModule.INSTANCE))
table_result = t_env.execute_sql("select concat('unload', 'load') as test_module")
self.assertEqual(table_result.get_result_kind(), ResultKind.SUCCESS_WITH_CONTENT)
self.assert_equals(table_result.get_table_schema().get_field_names(), ['test_module'])
def test_create_and_drop_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.utils.TestingFunctions$RichFunc0")
t_env.create_java_function(
"agg_func", "org.apache.flink.table.utils.TestingFunctions$ByteMaxAggFunction")
t_env.create_java_temporary_function(
"table_func", "org.apache.flink.table.utils.TestingFunctions$TableFunc1")
self.assert_equals(t_env.list_user_defined_functions(),
['scalar_func', 'agg_func', 'table_func'])
t_env.drop_temporary_system_function("scalar_func")
t_env.drop_function("agg_func")
t_env.drop_temporary_function("table_func")
self.assert_equals(t_env.list_user_defined_functions(), [])
def test_create_temporary_table_from_descriptor(self):
from pyflink.table.schema import Schema
t_env = self.t_env
catalog = t_env.get_current_catalog()
database = t_env.get_current_database()
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
t_env.create_temporary_table(
"T",
TableDescriptor.for_connector("fake")
.schema(schema)
.option("a", "Test")
.build())
self.assertFalse(t_env.get_catalog(catalog).table_exists(ObjectPath(database, "T")))
gateway = get_gateway()
catalog_table = CatalogBaseTable(
t_env._j_tenv.getCatalogManager()
.getTable(gateway.jvm.ObjectIdentifier.of(catalog, database, "T"))
.get()
.getTable())
self.assertEqual(schema, catalog_table.get_unresolved_schema())
self.assertEqual("fake", catalog_table.get_options().get("connector"))
self.assertEqual("Test", catalog_table.get_options().get("a"))
def test_create_table_from_descriptor(self):
from pyflink.table.schema import Schema
catalog = self.t_env.get_current_catalog()
database = self.t_env.get_current_database()
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
self.t_env.create_table(
"T",
TableDescriptor.for_connector("fake")
.schema(schema)
.option("a", "Test")
.build())
object_path = ObjectPath(database, "T")
self.assertTrue(self.t_env.get_catalog(catalog).table_exists(object_path))
catalog_table = self.t_env.get_catalog(catalog).get_table(object_path)
self.assertEqual(schema, catalog_table.get_unresolved_schema())
self.assertEqual("fake", catalog_table.get_options().get("connector"))
self.assertEqual("Test", catalog_table.get_options().get("a"))
def test_table_from_descriptor(self):
from pyflink.table.schema import Schema
schema = Schema.new_builder().column("f0", DataTypes.INT()).build()
descriptor = TableDescriptor.for_connector("fake").schema(schema).build()
table = self.t_env.from_descriptor(descriptor)
self.assertEqual(schema,
Schema(Schema.new_builder()._j_builder
.fromResolvedSchema(table._j_table.getResolvedSchema()).build()))
contextResolvedTable = table._j_table.getQueryOperation().getContextResolvedTable()
options = contextResolvedTable.getTable().getOptions()
self.assertEqual("fake", options.get("connector"))
def test_udt(self):
self.t_env.from_elements([
(DenseVector([1, 2, 3, 4]), 0., 1.),
(DenseVector([2, 2, 3, 4]), 0., 2.),
(DenseVector([3, 2, 3, 4]), 0., 3.),
(DenseVector([4, 2, 3, 4]), 0., 4.),
(DenseVector([5, 2, 3, 4]), 0., 5.),
(DenseVector([11, 2, 3, 4]), 1., 1.),
(DenseVector([12, 2, 3, 4]), 1., 2.),
(DenseVector([13, 2, 3, 4]), 1., 3.),
(DenseVector([14, 2, 3, 4]), 1., 4.),
(DenseVector([15, 2, 3, 4]), 1., 5.),
],
DataTypes.ROW([
DataTypes.FIELD("features", VectorUDT()),
DataTypes.FIELD("label", DataTypes.DOUBLE()),
DataTypes.FIELD("weight", DataTypes.DOUBLE())]))
def test_explain_with_multi_sinks(self):
t_env = self.t_env
source = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hello")], ["a", "b", "c"])
t_env.execute_sql("""
CREATE TABLE sink1 (
a BIGINT,
b STRING,
c STRING
) WITH (
'connector' = 'filesystem',
'path'='path1',
'format' = 'csv'
)
""")
t_env.execute_sql("""
CREATE TABLE sink2 (
a BIGINT,
b STRING,
c STRING
) WITH (
'connector' = 'filesystem',
'path'='path2',
'format' = 'csv'
)
""")
stmt_set = t_env.create_statement_set()
stmt_set.add_insert_sql("insert into sink1 select * from %s where a > 100" % source)
stmt_set.add_insert_sql("insert into sink2 select * from %s where a < 100" % source)
actual = stmt_set.explain(ExplainDetail.ESTIMATED_COST, ExplainDetail.CHANGELOG_MODE,
ExplainDetail.JSON_EXECUTION_PLAN)
self.assertIsInstance(actual, str)
def test_register_java_function(self):
t_env = self.t_env
t_env.create_java_temporary_system_function(
"scalar_func", "org.apache.flink.table.utils.TestingFunctions$RichFunc0")
t_env.create_java_temporary_system_function(
"agg_func", "org.apache.flink.table.utils.TestingFunctions$ByteMaxAggFunction")
t_env.create_java_temporary_system_function(
"table_func", "org.apache.flink.table.utils.TestingFunctions$TableFunc1")
actual = t_env.list_user_defined_functions()
expected = ['scalar_func', 'agg_func', 'table_func']
self.assert_equals(actual, expected)
def test_use_duplicated_modules(self):
self.assertRaisesRegex(
Py4JJavaError, "Module 'core' appears more than once",
self.t_env.use_modules, 'core', 'core')
def test_use_nonexistent_module(self):
self.assertRaisesRegex(
Py4JJavaError, "No module with name 'dummy' exists",
self.t_env.use_modules, 'core', 'dummy')
class DataStreamConversionTestCases(PyFlinkUTTestCase):
def setUp(self) -> None:
from pyflink.datastream import StreamExecutionEnvironment
super(DataStreamConversionTestCases, self).setUp()
config = Configuration()
config.set_string("akka.ask.timeout", "20 s")
self.env = StreamExecutionEnvironment.get_execution_environment(config)
self.t_env = StreamTableEnvironment.create(self.env)
self.env.set_parallelism(2)
self.t_env.get_config().set(
"python.fn-execution.bundle.size", "1")
self.test_sink = DataStreamTestSinkFunction()
def test_from_data_stream_atomic(self):
data_stream = self.env.from_collection([(1,), (2,), (3,), (4,), (5,)])
result = self.t_env.from_data_stream(data_stream).execute()
self.assertEqual("""(
`f0` RAW('[B', '...')
)""",
result._j_table_result.getResolvedSchema().toString())
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item
in map(str, [Row((1,)), Row((2,)), Row((3,)), Row((4,)), Row((5,))])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
def test_to_data_stream_atomic(self):
table = self.t_env.from_elements([(1,), (2,), (3,)], ["a"])
ds = self.t_env.to_data_stream(table)
ds.add_sink(self.test_sink)
self.env.execute()
results = self.test_sink.get_results(False)
results.sort()
expected = ['+I[1]', '+I[2]', '+I[3]']
self.assertEqual(expected, results)
def test_to_data_stream_local_time(self):
self.t_env.execute_sql("""
CREATE TEMPORARY VIEW v0 AS
SELECT f0, f1, f2, f3 FROM ( VALUES
( 1, DATE'1970-01-02', TIME'03:04:05', TIMESTAMP'1970-01-02 03:04:05' ),
( 2, DATE'1970-06-07', TIME'08:09:10', TIMESTAMP'1970-06-07 08:09:10' )
) AS t0 ( f0, f1, f2, f3 )
""")
v0 = self.t_env.from_path("v0")
self.t_env.to_data_stream(v0).key_by(lambda r: r['f0']).add_sink(self.test_sink)
self.env.execute()
results = self.test_sink.get_results(False)
results.sort()
expected = ['+I[1, 1970-01-02, 03:04:05, 1970-01-02T03:04:05]',
'+I[2, 1970-06-07, 08:09:10, 1970-06-07T08:09:10]']
self.assertEqual(expected, results)
def test_from_data_stream(self):
self.env.set_parallelism(1)
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW([Types.INT(),
Types.STRING(),
Types.STRING()]))
t_env = self.t_env
table = t_env.from_data_stream(ds)
sink_table_ddl = """
CREATE TABLE Sink(a INT, b STRING, c STRING) WITH ('connector'='test-sink')
"""
t_env.execute_sql(sink_table_ddl)
expr_sink_ddl = """
CREATE TABLE ExprSink(a INT, b STRING, c STRING) WITH ('connector'='test-sink')
"""
t_env.execute_sql(expr_sink_ddl)
table.execute_insert("Sink").wait()
result = source_sink_utils.results()
expected = ['+I[1, Hi, Hello]', '+I[2, Hello, Hi]']
self.assert_equals(result, expected)
ds = ds.map(lambda x: x, Types.ROW([Types.INT(), Types.STRING(), Types.STRING()])) \
.map(lambda x: x, Types.ROW([Types.INT(), Types.STRING(), Types.STRING()]))
table = t_env.from_data_stream(ds, col('a'), col('b'), col('c'))
table.execute_insert("ExprSink").wait()
result = source_sink_utils.results()
self.assert_equals(result, expected)
def test_from_data_stream_with_schema(self):
from pyflink.table import Schema
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW_NAMED(
["a", "b", "c"],
[Types.INT(), Types.STRING(), Types.STRING()]))
table = self.t_env.from_data_stream(ds,
Schema.new_builder()
.column("a", DataTypes.INT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
result = table.execute()
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in
map(str, [Row(1, 'Hi', 'Hello'), Row(2, 'Hello', 'Hi')])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
@unittest.skip
def test_from_and_to_data_stream_event_time(self):
from pyflink.table import Schema
ds = self.env.from_collection([(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW_NAMED(
["a", "b", "c"],
[Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
table = self.t_env.from_data_stream(ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
self.assertEqual("""(
`a` BIGINT,
`b` INT,
`c` STRING,
`rowtime` TIMESTAMP_LTZ(3) *ROWTIME* METADATA,
WATERMARK FOR `rowtime`: TIMESTAMP_LTZ(3) AS SOURCE_WATERMARK()
)""",
table._j_table.getResolvedSchema().toString())
self.t_env.create_temporary_view("t",
ds,
Schema.new_builder()
.column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)")
.watermark("rowtime", "SOURCE_WATERMARK()")
.build())
result = self.t_env.execute_sql("SELECT "
"c, SUM(b) "
"FROM t "
"GROUP BY c, TUMBLE(rowtime, INTERVAL '0.005' SECOND)")
with result.collect() as result:
collected_result = [str(item) for item in result]
expected_result = [item for item in
map(str, [Row('a', 47), Row('c', 1000), Row('c', 1000)])]
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
ds = self.t_env.to_data_stream(table)
ds.key_by(lambda k: k.c, key_type=Types.STRING()) \
.window(MyTumblingEventTimeWindow()) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute()
expected_results = ['(a,47)', '(c,1000)', '(c,1000)']
actual_results = self.test_sink.get_results(False)
expected_results.sort()
actual_results.sort()
self.assertEqual(expected_results, actual_results)
def test_from_and_to_changelog_stream_event_time(self):
from pyflink.table import Schema
self.env.set_parallelism(1)
ds = self.env.from_collection([(1, 42, "a"), (2, 5, "a"), (3, 1000, "c"), (100, 1000, "c")],
Types.ROW([Types.LONG(), Types.INT(), Types.STRING()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps()
.with_timestamp_assigner(MyTimestampAssigner()))
changelog_stream = ds.map(lambda t: Row(t.f1, t.f2),
Types.ROW([Types.INT(), Types.STRING()]))
# derive physical columns and add a rowtime
table = self.t_env.from_changelog_stream(
changelog_stream,
Schema.new_builder()
.column_by_metadata("rowtime", DataTypes.TIMESTAMP_LTZ(3))
.column_by_expression("computed", str(col("f1").upper_case))
.watermark("rowtime", str(source_watermark()))
.build())
self.t_env.create_temporary_view("t", table)
# access and reorder columns
reordered = self.t_env.sql_query("SELECT computed, rowtime, f0 FROM t")
# write out the rowtime column with fully declared schema
result = self.t_env.to_changelog_stream(
reordered,
Schema.new_builder()
.column("f1", DataTypes.STRING())
.column_by_metadata("rowtime", DataTypes.TIMESTAMP_LTZ(3))
.column_by_expression("ignored", str(col("f1").upper_case))
.column("f0", DataTypes.INT())
.build()
)
# test event time window and field access
result.key_by(lambda k: k.f1) \
.window(MyTumblingEventTimeWindow()) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute()
expected_results = ['(A,47)', '(C,1000)', '(C,1000)']
actual_results = self.test_sink.get_results(False)
expected_results.sort()
actual_results.sort()
self.assertEqual(expected_results, actual_results)
def test_to_append_stream(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements([(1, "Hi", "Hello"), (2, "Hello", "Hi")], ["a", "b", "c"])
new_table = table.select(table.a + 1, table.b + 'flink', table.c)
ds = t_env.to_append_stream(table=new_table, type_info=Types.ROW([Types.LONG(),
Types.STRING(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.add_sink(test_sink)
self.env.execute("test_to_append_stream")
result = test_sink.get_results(False)
expected = ['+I[2, Hiflink, Hello]', '+I[3, Helloflink, Hi]']
self.assertEqual(result, expected)
def test_to_retract_stream(self):
self.env.set_parallelism(1)
t_env = StreamTableEnvironment.create(
self.env,
environment_settings=EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements([(1, "Hi", "Hello"), (1, "Hi", "Hello")], ["a", "b", "c"])
new_table = table.group_by(table.c).select(table.a.sum, table.c.alias("b"))
ds = t_env.to_retract_stream(table=new_table, type_info=Types.ROW([Types.LONG(),
Types.STRING()]))
test_sink = DataStreamTestSinkFunction()
ds.map(lambda x: x).add_sink(test_sink)
self.env.execute("test_to_retract_stream")
result = test_sink.get_results(True)
expected = ["(True, Row(f0=1, f1='Hello'))", "(False, Row(f0=1, f1='Hello'))",
"(True, Row(f0=2, f1='Hello'))"]
self.assertEqual(result, expected)
def test_side_output_stream_to_table(self):
tag = OutputTag("side", Types.ROW([Types.INT()]))
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx):
yield Row(value)
yield tag, Row(value * 2)
ds = self.env.from_collection([1, 2, 3], Types.INT()).process(MyProcessFunction())
ds_side = ds.get_side_output(tag)
expected = ['<Row(2)>', '<Row(4)>', '<Row(6)>']
t = self.t_env.from_data_stream(ds_side)
result = [str(i) for i in t.execute().collect()]
result.sort()
self.assertEqual(expected, result)
self.t_env.create_temporary_view("side_table", ds_side)
table_result = self.t_env.execute_sql("SELECT * FROM side_table")
result = [str(i) for i in table_result.collect()]
result.sort()
self.assertEqual(expected, result)
class StreamTableEnvironmentTests(PyFlinkStreamTableTestCase):
def test_collect_with_retract(self):
expected_row_kinds = [RowKind.INSERT, RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER,
RowKind.INSERT, RowKind.UPDATE_BEFORE, RowKind.UPDATE_AFTER]
element_data = [(1, 2, 'a'),
(3, 4, 'b'),
(5, 6, 'a'),
(7, 8, 'b')]
field_names = ['a', 'b', 'c']
source = self.t_env.from_elements(element_data, field_names)
table_result = self.t_env.execute_sql(
"SELECT SUM(a), c FROM %s group by c" % source)
with table_result.collect() as result:
collected_result = []
for i in result:
collected_result.append(i)
collected_result = [str(result) + ',' + str(result.get_row_kind())
for result in collected_result]
expected_result = [Row(1, 'a'), Row(1, 'a'), Row(6, 'a'), Row(3, 'b'),
Row(3, 'b'), Row(10, 'b')]
for i in range(len(expected_result)):
expected_result[i] = str(expected_result[i]) + ',' + str(expected_row_kinds[i])
expected_result.sort()
collected_result.sort()
self.assertEqual(expected_result, collected_result)
def test_collect_for_all_data_types(self):
expected_result = [Row(1, None, 1, True, 32767, -2147483648, 1.23,
1.98932, bytearray(b'pyflink'), 'pyflink',
datetime.date(2014, 9, 13), datetime.time(12, 0, 0, 123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[Row(['[pyflink]']), Row(['[pyflink]']), Row(['[pyflink]'])],
{1: Row(['[flink]']), 2: Row(['[pyflink]'])},
decimal.Decimal('1000000000000000000.050000000000000000'),
decimal.Decimal('1000000000000000000.059999999999999999'))]
source = self.t_env.from_elements(
[(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932, bytearray(b'pyflink'), 'pyflink',
datetime.date(2014, 9, 13), datetime.time(hour=12, minute=0, second=0,
microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[Row(['pyflink']), Row(['pyflink']), Row(['pyflink'])],
{1: Row(['flink']), 2: Row(['pyflink'])}, decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'))], DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.BIGINT()), DataTypes.FIELD("b", DataTypes.BIGINT()),
DataTypes.FIELD("c", DataTypes.TINYINT()),
DataTypes.FIELD("d", DataTypes.BOOLEAN()),
DataTypes.FIELD("e", DataTypes.SMALLINT()),
DataTypes.FIELD("f", DataTypes.INT()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.BYTES()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.DATE()),
DataTypes.FIELD("l", DataTypes.TIME()),
DataTypes.FIELD("m", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("n", DataTypes.ARRAY(DataTypes.ROW([DataTypes.FIELD('ss2',
DataTypes.STRING())]))),
DataTypes.FIELD("o", DataTypes.MAP(DataTypes.BIGINT(), DataTypes.ROW(
[DataTypes.FIELD('ss', DataTypes.STRING())]))),
DataTypes.FIELD("p", DataTypes.DECIMAL(38, 18)), DataTypes.FIELD("q",
DataTypes.DECIMAL(38, 18))]))
table_result = source.execute()
with table_result.collect() as result:
collected_result = []
for i in result:
collected_result.append(i)
self.assertEqual(expected_result, collected_result)
class VectorUDT(UserDefinedType):
@classmethod
def sql_type(cls):
return DataTypes.ROW(
[
DataTypes.FIELD("type", DataTypes.TINYINT()),
DataTypes.FIELD("size", DataTypes.INT()),
DataTypes.FIELD("indices", DataTypes.ARRAY(DataTypes.INT())),
DataTypes.FIELD("values", DataTypes.ARRAY(DataTypes.DOUBLE())),
]
)
@classmethod
def module(cls):
return "pyflink.ml.core.linalg"
def serialize(self, obj):
if isinstance(obj, DenseVector):
values = [float(v) for v in obj._values]
return 1, None, None, values
else:
raise TypeError("Cannot serialize {!r} of type {!r}".format(obj, type(obj)))
def deserialize(self, datum):
pass
class DenseVector(object):
__UDT__ = VectorUDT()
def __init__(self, values):
self._values = values
def size(self) -> int:
return len(self._values)
def get(self, i: int):
return self._values[i]
def to_array(self):
return self._values
@property
def values(self):
return self._values
def __str__(self):
return "[" + ",".join([str(v) for v in self._values]) + "]"
def __repr__(self):
return "DenseVector([%s])" % (", ".join(str(i) for i in self._values))
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[0])
class MyTumblingEventTimeWindow(MergingWindowAssigner[tuple, TimeWindow]):
def merge_windows(self,
windows,
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
window_list = [w for w in windows]
window_list.sort()
for i in range(1, len(window_list)):
if window_list[i - 1].end > window_list[i].start:
callback.merge([window_list[i - 1], window_list[i]],
TimeWindow(window_list[i - 1].start, window_list[i].end))
def assign_windows(self,
element: tuple,
timestamp: int,
context):
return [TimeWindow(timestamp, timestamp + 5)]
def get_default_trigger(self, env) -> Trigger[tuple, TimeWindow]:
return SimpleTimeWindowTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
class SimpleTimeWindowTrigger(Trigger[tuple, TimeWindow]):
def on_element(self,
element: tuple,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if time >= window.max_timestamp():
return TriggerResult.FIRE_AND_PURGE
else:
return TriggerResult.CONTINUE
def on_merge(self,
window: TimeWindow,
ctx: 'Trigger.OnMergeContext') -> None:
pass
def clear(self,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> None:
pass
class SumWindowFunction(WindowFunction[tuple, tuple, str, TimeWindow]):
def apply(self, key: str, window: TimeWindow, inputs: Iterable[tuple]):
result = 0
for i in inputs:
result += i[1]
return [(key, result)]
| 35,152 | 42.614144 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_explain.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
from pyflink.table.explain_detail import ExplainDetail
class StreamTableExplainTests(PyFlinkStreamTableTestCase):
def test_explain(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(t.a.sum, t.c.alias('b')).explain(
ExplainDetail.CHANGELOG_MODE, ExplainDetail.PLAN_ADVICE)
assert isinstance(result, str)
self.assertGreaterEqual(result.find('== Optimized Physical Plan With Advice =='), 0)
self.assertGreaterEqual(result.find('advice[1]: [ADVICE] You might want to enable '
'local-global two-phase optimization by configuring ('
'\'table.exec.mini-batch.enabled\' to \'true\', '
'\'table.exec.mini-batch.allow-latency\' to a '
'positive long value, \'table.exec.mini-batch.size\' '
'to a positive long value).'), 0)
result = t.group_by(t.c).select(t.a.sum, t.c.alias('b')).explain(
ExplainDetail.JSON_EXECUTION_PLAN)
assert isinstance(result, str)
try:
json.loads(result.split('== Physical Execution Plan ==')[1])
except:
self.fail('The execution plan of explain detail is not in json format.')
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 2,703 | 46.438596 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_schema_operation.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table.table_schema import TableSchema
from pyflink.table.types import DataTypes
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase
class StreamTableSchemaTests(PyFlinkStreamTableTestCase):
def test_print_schema(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(t.a.sum, t.c.alias('b'))
result.print_schema()
def test_get_schema(self):
t = self.t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c'])
result = t.group_by(t.c).select(t.a.sum.alias('a'), t.c.alias('b'))
schema = result.get_schema()
assert schema == TableSchema(["a", "b"], [DataTypes.BIGINT(), DataTypes.STRING()])
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 1,963 | 40.787234 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_shell_example.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ShellExampleTests(PyFlinkTestCase):
"""
If these tests failed, please fix these examples code and copy them to shell.py
"""
def test_stream_case(self):
from pyflink.shell import s_env, st_env, DataTypes
from pyflink.table.schema import Schema
from pyflink.table.table_descriptor import TableDescriptor, FormatDescriptor
# example begin
import tempfile
import os
import shutil
sink_path = tempfile.gettempdir() + '/streaming.csv'
if os.path.exists(sink_path):
if os.path.isfile(sink_path):
os.remove(sink_path)
else:
shutil.rmtree(sink_path)
s_env.set_parallelism(1)
t = st_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])
st_env.create_temporary_table("stream_sink", TableDescriptor.for_connector("filesystem")
.schema(Schema.new_builder()
.column("a", DataTypes.BIGINT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
.option("path", sink_path)
.format(FormatDescriptor.for_format("csv")
.option("field-delimiter", ",")
.build())
.build())
from pyflink.table.expressions import col
t.select(col('a') + 1, col('b'), col('c')).execute_insert("stream_sink").wait()
# verify code, do not copy these code to shell.py
with open(os.path.join(sink_path, os.listdir(sink_path)[0]), 'r') as f:
lines = f.read()
self.assertEqual(lines, '2,hi,hello\n' + '3,hi,hello\n')
| 3,000 | 46.634921 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/table/tests/test_catalog.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
from pyflink.table import TableSchema, DataTypes
from pyflink.table.catalog import ObjectPath, Catalog, CatalogDatabase, CatalogBaseTable, \
CatalogFunction, CatalogPartition, CatalogPartitionSpec
from pyflink.testing.test_case_utils import PyFlinkTestCase
from pyflink.util.exceptions import DatabaseNotExistException, FunctionNotExistException, \
PartitionNotExistException, TableNotExistException, DatabaseAlreadyExistException, \
FunctionAlreadyExistException, PartitionAlreadyExistsException, PartitionSpecInvalidException, \
TableNotPartitionedException, TableAlreadyExistException, DatabaseNotEmptyException
class CatalogTestBase(PyFlinkTestCase):
db1 = "db1"
db2 = "db2"
non_exist_database = "non-exist-db"
t1 = "t1"
t2 = "t2"
t3 = "t3"
test_catalog_name = "test-catalog"
test_comment = "test comment"
def setUp(self):
super(CatalogTestBase, self).setUp()
gateway = get_gateway()
self.catalog = Catalog(gateway.jvm.GenericInMemoryCatalog(self.test_catalog_name))
self.path1 = ObjectPath(self.db1, self.t1)
self.path2 = ObjectPath(self.db2, self.t2)
self.path3 = ObjectPath(self.db1, self.t2)
self.path4 = ObjectPath(self.db1, self.t3)
self.non_exist_db_path = ObjectPath.from_string("non.exist")
self.non_exist_object_path = ObjectPath.from_string("db1.nonexist")
def check_catalog_database_equals(self, cd1, cd2):
self.assertEqual(cd1.get_comment(), cd2.get_comment())
self.assertEqual(cd1.get_properties(), cd2.get_properties())
def check_catalog_table_equals(self, t1, t2):
self.assertEqual(t1.get_schema(), t2.get_schema())
self.assertEqual(t1.get_options(), t2.get_options())
self.assertEqual(t1.get_comment(), t2.get_comment())
def check_catalog_view_equals(self, v1, v2):
self.assertEqual(v1.get_schema(), v1.get_schema())
self.assertEqual(v1.get_options(), v2.get_options())
self.assertEqual(v1.get_comment(), v2.get_comment())
def check_catalog_function_equals(self, f1, f2):
self.assertEqual(f1.get_class_name(), f2.get_class_name())
self.assertEqual(f1.is_generic(), f2.is_generic())
self.assertEqual(f1.get_function_language(), f2.get_function_language())
def check_catalog_partition_equals(self, p1, p2):
self.assertEqual(p1.get_properties(), p2.get_properties())
@staticmethod
def create_db():
return CatalogDatabase.create_instance({"k1": "v1"}, CatalogTestBase.test_comment)
@staticmethod
def create_another_db():
return CatalogDatabase.create_instance({"k2": "v2"}, "this is another database.")
@staticmethod
def create_table_schema():
return TableSchema(["first", "second", "third"],
[DataTypes.STRING(), DataTypes.INT(), DataTypes.STRING()])
@staticmethod
def create_another_table_schema():
return TableSchema(["first2", "second", "third"],
[DataTypes.STRING(), DataTypes.STRING(), DataTypes.STRING()])
@staticmethod
def get_batch_table_properties():
return {"is_streaming": "false"}
@staticmethod
def get_streaming_table_properties():
return {"is_streaming": "true"}
@staticmethod
def create_partition_keys():
return ["second", "third"]
@staticmethod
def create_table():
return CatalogBaseTable.create_table(
schema=CatalogTestBase.create_table_schema(),
properties=CatalogTestBase.get_batch_table_properties(),
comment=CatalogTestBase.test_comment)
@staticmethod
def create_another_table():
return CatalogBaseTable.create_table(
schema=CatalogTestBase.create_another_table_schema(),
properties=CatalogTestBase.get_batch_table_properties(),
comment=CatalogTestBase.test_comment)
@staticmethod
def create_stream_table():
return CatalogBaseTable.create_table(
schema=CatalogTestBase.create_table_schema(),
properties=CatalogTestBase.get_streaming_table_properties(),
comment=CatalogTestBase.test_comment)
@staticmethod
def create_partitioned_table():
return CatalogBaseTable.create_table(
schema=CatalogTestBase.create_table_schema(),
partition_keys=CatalogTestBase.create_partition_keys(),
properties=CatalogTestBase.get_batch_table_properties(),
comment=CatalogTestBase.test_comment)
@staticmethod
def create_another_partitioned_table():
return CatalogBaseTable.create_table(
schema=CatalogTestBase.create_another_table_schema(),
partition_keys=CatalogTestBase.create_partition_keys(),
properties=CatalogTestBase.get_batch_table_properties(),
comment=CatalogTestBase.test_comment)
@staticmethod
def create_view():
table_schema = CatalogTestBase.create_table_schema()
return CatalogBaseTable.create_view(
"select * from t1",
"select * from test-catalog.db1.t1",
table_schema,
{},
"This is a view")
@staticmethod
def create_another_view():
table_schema = CatalogTestBase.create_another_table_schema()
return CatalogBaseTable.create_view(
"select * from t2",
"select * from test-catalog.db2.t2",
table_schema,
{},
"This is another view")
@staticmethod
def create_function():
return CatalogFunction.create_instance(
"org.apache.flink.table.functions.python.PythonScalarFunction", "Java")
@staticmethod
def create_another_function():
return CatalogFunction.create_instance(
"org.apache.flink.table.functions.ScalarFunction", "Java")
@staticmethod
def create_partition_spec():
return CatalogPartitionSpec({"third": "2000", "second": "bob"})
@staticmethod
def create_another_partition_spec():
return CatalogPartitionSpec({"third": "2010", "second": "bob"})
@staticmethod
def create_partition():
return CatalogPartition.create_instance(
CatalogTestBase.get_batch_table_properties(), "catalog partition tests")
@staticmethod
def create_partition_spec_subset():
return CatalogPartitionSpec({"second": "bob"})
@staticmethod
def create_another_partition_spec_subset():
return CatalogPartitionSpec({"third": "2000"})
@staticmethod
def create_invalid_partition_spec_subset():
return CatalogPartitionSpec({"third": "2010"})
def test_create_db(self):
self.assertFalse(self.catalog.database_exists(self.db1))
catalog_db = self.create_db()
self.catalog.create_database(self.db1, catalog_db, False)
self.assertTrue(self.catalog.database_exists(self.db1))
self.check_catalog_database_equals(catalog_db, self.catalog.get_database(self.db1))
def test_create_db_database_already_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
with self.assertRaises(DatabaseAlreadyExistException):
self.catalog.create_database(self.db1, self.create_db(), False)
def test_create_db_database_already_exist_ignored(self):
catalog_db = self.create_db()
self.catalog.create_database(self.db1, catalog_db, False)
dbs = self.catalog.list_databases()
self.check_catalog_database_equals(catalog_db, self.catalog.get_database(self.db1))
self.assertEqual(2, len(dbs))
self.assertEqual({self.db1, self.catalog.get_default_database()}, set(dbs))
self.catalog.create_database(self.db1, self.create_another_db(), True)
self.check_catalog_database_equals(catalog_db, self.catalog.get_database(self.db1))
self.assertEqual(2, len(dbs))
self.assertEqual({self.db1, self.catalog.get_default_database()}, set(dbs))
def test_get_db_database_not_exist_exception(self):
with self.assertRaises(DatabaseNotExistException):
self.catalog.get_database("nonexistent")
def test_drop_db(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.assertTrue(self.catalog.database_exists(self.db1))
self.catalog.drop_database(self.db1, False)
self.assertFalse(self.catalog.database_exists(self.db1))
def test_drop_db_database_not_exist_exception(self):
with self.assertRaises(DatabaseNotExistException):
self.catalog.drop_database(self.db1, False)
def test_drop_db_database_not_exist_ignore(self):
self.catalog.drop_database(self.db1, True)
def test_drop_db_database_not_empty_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
with self.assertRaises(DatabaseNotEmptyException):
self.catalog.drop_database(self.db1, True)
def test_alter_db(self):
db = self.create_db()
self.catalog.create_database(self.db1, db, False)
new_db = self.create_another_db()
self.catalog.alter_database(self.db1, new_db, False)
new_properties = self.catalog.get_database(self.db1).get_properties()
old_properties = db.get_properties()
self.assertFalse(all(k in new_properties for k in old_properties.keys()))
self.check_catalog_database_equals(new_db, self.catalog.get_database(self.db1))
def test_alter_db_database_not_exist_exception(self):
with self.assertRaises(DatabaseNotExistException):
self.catalog.alter_database("nonexistent", self.create_db(), False)
def test_alter_db_database_not_exist_ignored(self):
self.catalog.alter_database("nonexistent", self.create_db(), True)
self.assertFalse(self.catalog.database_exists("nonexistent"))
def test_db_exists(self):
self.assertFalse(self.catalog.database_exists("nonexistent"))
self.catalog.create_database(self.db1, self.create_db(), False)
self.assertTrue(self.catalog.database_exists(self.db1))
def test_create_table_streaming(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_stream_table()
self.catalog.create_table(self.path1, table, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
def test_create_table_batch(self):
self.catalog.create_database(self.db1, self.create_db(), False)
# Non - partitioned table
table = self.create_table()
self.catalog.create_table(self.path1, table, False)
table_created = self.catalog.get_table(self.path1)
self.check_catalog_table_equals(table, table_created)
self.assertEqual(self.test_comment, table_created.get_description())
tables = self.catalog.list_tables(self.db1)
self.assertEqual(1, len(tables))
self.assertEqual(self.path1.get_object_name(), tables[0])
self.catalog.drop_table(self.path1, False)
# Partitioned table
self.table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
tables = self.catalog.list_tables(self.db1)
self.assertEqual(1, len(tables))
self.assertEqual(self.path1.get_object_name(), tables[0])
def test_create_table_database_not_exist_exception(self):
self.assertFalse(self.catalog.database_exists(self.db1))
with self.assertRaises(DatabaseNotExistException):
self.catalog.create_table(self.non_exist_object_path, self.create_table(), False)
def test_create_table_table_already_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
with self.assertRaises(TableAlreadyExistException):
self.catalog.create_table(self.path1, self.create_table(), False)
def test_create_table_table_already_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_table()
self.catalog.create_table(self.path1, table, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
self.catalog.create_table(self.path1, self.create_another_table(), True)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
def test_get_table_table_not_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
with self.assertRaises(TableNotExistException):
self.catalog.get_table(self.non_exist_object_path)
def test_get_table_table_not_exist_exception_no_db(self):
with self.assertRaises(TableNotExistException):
self.catalog.get_table(self.non_exist_object_path)
def test_drop_table_non_partitioned_table(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
self.assertTrue(self.catalog.table_exists(self.path1))
self.catalog.drop_table(self.path1, False)
self.assertFalse(self.catalog.table_exists(self.path1))
def test_drop_table_table_not_exist_exception(self):
with self.assertRaises(TableNotExistException):
self.catalog.drop_table(self.non_exist_db_path, False)
def test_drop_table_table_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.drop_table(self.non_exist_object_path, True)
def test_alter_table(self):
self.catalog.create_database(self.db1, self.create_db(), False)
# Non - partitioned table
table = self.create_table()
self.catalog.create_table(self.path1, table, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
new_table = self.create_another_table()
self.catalog.alter_table(self.path1, new_table, False)
self.assertNotEqual(table, self.catalog.get_table(self.path1))
self.check_catalog_table_equals(new_table, self.catalog.get_table(self.path1))
self.catalog.drop_table(self.path1, False)
# Partitioned table
table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
new_table = self.create_another_partitioned_table()
self.catalog.alter_table(self.path1, new_table, False)
self.check_catalog_table_equals(new_table, self.catalog.get_table(self.path1))
# View
view = self.create_view()
self.catalog.create_table(self.path3, view, False)
self.check_catalog_view_equals(view, self.catalog.get_table(self.path3))
new_view = self.create_another_view()
self.catalog.alter_table(self.path3, new_view, False)
self.assertNotEqual(view, self.catalog.get_table(self.path3))
self.check_catalog_view_equals(new_view, self.catalog.get_table(self.path3))
def test_alter_table_table_not_exist_exception(self):
with self.assertRaises(TableNotExistException):
self.catalog.alter_table(self.non_exist_db_path, self.create_table(), False)
def test_alter_table_table_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.alter_table(self.non_exist_object_path, self.create_table(), True)
self.assertFalse(self.catalog.table_exists(self.non_exist_object_path))
def test_rename_table_non_partitioned_table(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_table()
self.catalog.create_table(self.path1, table, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path1))
self.catalog.rename_table(self.path1, self.t2, False)
self.check_catalog_table_equals(table, self.catalog.get_table(self.path3))
self.assertFalse(self.catalog.table_exists(self.path1))
def test_rename_table_table_not_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
with self.assertRaises(TableNotExistException):
self.catalog.rename_table(self.path1, self.t2, False)
def test_rename_table_table_not_exist_exception_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.rename_table(self.path1, self.t2, True)
def test_rename_table_table_already_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_table()
self.catalog.create_table(self.path1, table, False)
self.catalog.create_table(self.path3, self.create_another_table(), False)
with self.assertRaises(TableAlreadyExistException):
self.catalog.rename_table(self.path1, self.t2, False)
def test_list_tables(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
self.catalog.create_table(self.path3, self.create_table(), False)
self.catalog.create_table(self.path4, self.create_view(), False)
self.assertEqual(3, len(self.catalog.list_tables(self.db1)))
self.assertEqual(1, len(self.catalog.list_views(self.db1)))
def test_table_exists(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.assertFalse(self.catalog.table_exists(self.path1))
self.catalog.create_table(self.path1, self.create_table(), False)
self.assertTrue(self.catalog.table_exists(self.path1))
def test_create_view(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.assertFalse(self.catalog.table_exists(self.path1))
view = self.create_view()
self.catalog.create_table(self.path1, view, False)
self.check_catalog_view_equals(view, self.catalog.get_table(self.path1))
def test_create_view_database_not_exist_exception(self):
self.assertFalse(self.catalog.database_exists(self.db1))
with self.assertRaises(DatabaseNotExistException):
self.catalog.create_table(self.non_exist_object_path, self.create_view(), False)
def test_create_view_table_already_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_view(), False)
with self.assertRaises(TableAlreadyExistException):
self.catalog.create_table(self.path1, self.create_view(), False)
def test_create_view_table_already_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
view = self.create_view()
self.catalog.create_table(self.path1, view, False)
self.check_catalog_view_equals(view, self.catalog.get_table(self.path1))
self.catalog.create_table(self.path1, self.create_another_view(), True)
self.check_catalog_view_equals(view, self.catalog.get_table(self.path1))
def test_drop_view(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_view(), False)
self.assertTrue(self.catalog.table_exists(self.path1))
self.catalog.drop_table(self.path1, False)
self.assertFalse(self.catalog.table_exists(self.path1))
def test_alter_view(self):
self.catalog.create_database(self.db1, self.create_db(), False)
view = self.create_view()
self.catalog.create_table(self.path1, view, False)
self.check_catalog_view_equals(view, self.catalog.get_table(self.path1))
new_view = self.create_another_view()
self.catalog.alter_table(self.path1, new_view, False)
self.check_catalog_view_equals(new_view, self.catalog.get_table(self.path1))
def test_alter_view_table_not_exist_exception(self):
with self.assertRaises(TableNotExistException):
self.catalog.alter_table(self.non_exist_db_path, self.create_table(), False)
def test_alter_view_table_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.alter_table(self.non_exist_object_path, self.create_view(), True)
self.assertFalse(self.catalog.table_exists(self.non_exist_object_path))
def test_list_view(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.assertTrue(0 == len(self.catalog.list_tables(self.db1)))
self.catalog.create_table(self.path1, self.create_view(), False)
self.catalog.create_table(self.path3, self.create_table(), False)
self.assertEqual(2, len(self.catalog.list_tables(self.db1)))
self.assertEqual({self.path1.get_object_name(), self.path3.get_object_name()},
set(self.catalog.list_tables(self.db1)))
self.assertEqual([self.path1.get_object_name()], self.catalog.list_views(self.db1))
def test_rename_view(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_view(), False)
self.assertTrue(self.catalog.table_exists(self.path1))
self.catalog.rename_table(self.path1, self.t2, False)
self.assertFalse(self.catalog.table_exists(self.path1))
self.assertTrue(self.catalog.table_exists(self.path3))
def test_create_function(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.assertFalse(self.catalog.function_exists(self.path1))
self.catalog.create_function(self.path1, self.create_function(), False)
self.assertTrue(self.catalog.function_exists(self.path1))
def test_create_function_database_not_exist_exception(self):
self.assertFalse(self.catalog.database_exists(self.db1))
with self.assertRaises(DatabaseNotExistException):
self.catalog.create_function(self.path1, self.create_function(), False)
def test_create_functin_function_already_exist_function(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_function(self.path1, self.create_function(), False)
self.assertTrue(self.catalog.function_exists(self.path1))
# test 'ignoreIfExist' flag
self.catalog.create_function(self.path1, self.create_another_function(), True)
with self.assertRaises(FunctionAlreadyExistException):
self.catalog.create_function(self.path1, self.create_function(), False)
def test_alter_function(self):
self.catalog.create_database(self.db1, self.create_db(), False)
func = self.create_function()
self.catalog.create_function(self.path1, func, False)
self.check_catalog_function_equals(func, self.catalog.get_function(self.path1))
new_func = self.create_another_function()
self.catalog.alter_function(self.path1, new_func, False)
actual = self.catalog.get_function(self.path1)
self.assertFalse(func.get_class_name() == (actual.get_class_name()))
self.check_catalog_function_equals(new_func, actual)
def test_alter_function_function_not_exist_exception(self):
with self.assertRaises(FunctionNotExistException):
self.catalog.alter_function(self.non_exist_object_path, self.create_function(), False)
def test_alter_function_function_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.alter_function(self.non_exist_object_path, self.create_function(), True)
self.assertFalse(self.catalog.function_exists(self.non_exist_object_path))
def test_list_functions(self):
self.catalog.create_database(self.db1, self.create_db(), False)
func = self.create_function()
self.catalog.create_function(self.path1, func, False)
self.assertEqual(self.path1.get_object_name(), self.catalog.list_functions(self.db1)[0])
def test_list_functions_database_not_exist_exception(self):
with self.assertRaises(DatabaseNotExistException):
self.catalog.list_functions(self.db1)
def test_get_function_function_not_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
with self.assertRaises(FunctionNotExistException):
self.catalog.get_function(self.non_exist_object_path)
def test_get_function_function_not_exist_exception_no_db(self):
with self.assertRaises(FunctionNotExistException):
self.catalog.get_function(self.non_exist_object_path)
def test_drop_function(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_function(self.path1, self.create_function(), False)
self.assertTrue(self.catalog.function_exists(self.path1))
self.catalog.drop_function(self.path1, False)
self.assertFalse(self.catalog.function_exists(self.path1))
def test_drop_function_function_not_exist_exception(self):
with self.assertRaises(FunctionNotExistException):
self.catalog.drop_function(self.non_exist_db_path, False)
def test_drop_function_function_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.drop_function(self.non_exist_object_path, True)
self.catalog.drop_database(self.db1, False)
def test_create_partition(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.assertTrue(0 == len(self.catalog.list_partitions(self.path1)))
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
self.check_catalog_partition_equals(self.create_partition(),
self.catalog.get_partition(
self.path1, self.create_partition_spec()))
self.catalog.create_partition(
self.path1, self.create_another_partition_spec(), self.create_partition(), False)
self.check_catalog_partition_equals(self.create_partition(),
self.catalog.get_partition(
self.path1, self.create_another_partition_spec()))
def test_create_partition_table_not_exist_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
with self.assertRaises(TableNotExistException):
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
def test_create_partition_table_not_partitoned_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
with self.assertRaises(TableNotPartitionedException):
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
def test_create_partition_partition_spec_invalid_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
partition_spec = self.create_invalid_partition_spec_subset()
with self.assertRaises(PartitionSpecInvalidException):
self.catalog.create_partition(
self.path1, partition_spec, self.create_partition(), False)
def test_create_partition_partition_already_exists_exception(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
partition = self.create_partition()
self.catalog.create_partition(self.path1, self.create_partition_spec(), partition, False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionAlreadyExistsException):
self.catalog.create_partition(
self.path1, partition_spec, self.create_partition(), False)
def test_create_partition_partition_already_exists_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
partition_spec = self.create_partition_spec()
self.catalog.create_partition(self.path1, partition_spec, self.create_partition(), False)
self.catalog.create_partition(self.path1, partition_spec, self.create_partition(), True)
def test_drop_partition(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
self.catalog.drop_partition(self.path1, self.create_partition_spec(), False)
self.assertEqual([], self.catalog.list_partitions(self.path1))
def test_drop_partition_table_not_exist(self):
self.catalog.create_database(self.db1, self.create_db(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.drop_partition(self.path1, partition_spec, False)
def test_drop_partition_table_not_partitioned(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.drop_partition(self.path1, partition_spec, False)
def test_drop_partition_partition_spec_invalid(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
partition_spec = self.create_invalid_partition_spec_subset()
with self.assertRaises(PartitionNotExistException):
self.catalog.drop_partition(self.path1, partition_spec, False)
def test_drop_partition_patition_not_exist(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.drop_partition(self.path1, partition_spec, False)
def test_drop_partition_patition_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.catalog.drop_partition(self.path1, self.create_partition_spec(), True)
def test_alter_partition(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
cp = self.catalog.get_partition(self.path1, self.create_partition_spec())
self.check_catalog_partition_equals(self.create_partition(), cp)
self.assertIsNone(cp.get_properties().get("k"))
another = CatalogPartition.create_instance(
{"is_streaming": "false", "k": "v"}, "catalog partition")
self.catalog.alter_partition(self.path1, self.create_partition_spec(), another, False)
cp = self.catalog.get_partition(self.path1, self.create_partition_spec())
self.check_catalog_partition_equals(another, cp)
self.assertEqual("v", cp.get_properties().get("k"))
def test_alter_partition_table_not_exist(self):
self.catalog.create_database(self.db1, self.create_db(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.alter_partition(self.path1, partition_spec, self.create_partition(), False)
def test_alter_partition_table_not_partitioned(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.alter_partition(self.path1, partition_spec, self.create_partition(), False)
def test_alter_partition_partition_spec_invalid(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
partition_spec = self.create_invalid_partition_spec_subset()
with self.assertRaises(PartitionNotExistException):
self.catalog.alter_partition(self.path1, partition_spec, self.create_partition(), False)
def test_alter_partition_partition_not_exist(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.alter_partition(self.path1, partition_spec, self.create_partition(), False)
def test_alter_partition_partition_not_exist_ignored(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.catalog.alter_partition(self.path1, self.create_partition_spec(),
self.create_partition(), True)
def test_get_partition_table_not_exists(self):
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.get_partition(self.path1, partition_spec)
def test_get_partition_table_not_partitioned(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_table(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.get_partition(self.path1, partition_spec)
def test_get_partition_partition_spec_invalid_invalid_partition_spec(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
partition_spec = self.create_invalid_partition_spec_subset()
with self.assertRaises(PartitionNotExistException):
self.catalog.get_partition(self.path1, partition_spec)
def test_get_partition_partition_spec_invalid_size_not_equal(self):
self.catalog.create_database(self.db1, self.create_db(), False)
table = self.create_partitioned_table()
self.catalog.create_table(self.path1, table, False)
partition_spec = self.create_partition_spec_subset()
with self.assertRaises(PartitionNotExistException):
self.catalog.get_partition(self.path1, partition_spec)
def test_get_partition_partition_not_exist(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
partition_spec = self.create_partition_spec()
with self.assertRaises(PartitionNotExistException):
self.catalog.get_partition(self.path1, partition_spec)
def test_partition_exists(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
self.assertTrue(self.catalog.partition_exists(self.path1, self.create_partition_spec()))
self.assertFalse(self.catalog.partition_exists(self.path2, self.create_partition_spec()))
self.assertFalse(self.catalog.partition_exists(ObjectPath.from_string("non.exist"),
self.create_partition_spec()))
def test_list_partition_partial_spec(self):
self.catalog.create_database(self.db1, self.create_db(), False)
self.catalog.create_table(self.path1, self.create_partitioned_table(), False)
self.catalog.create_partition(self.path1, self.create_partition_spec(),
self.create_partition(), False)
self.catalog.create_partition(self.path1, self.create_another_partition_spec(),
self.create_partition(), False)
self.assertEqual(2,
len(self.catalog.list_partitions(
self.path1, self.create_partition_spec_subset())))
self.assertEqual(1,
len(self.catalog.list_partitions(
self.path1, self.create_another_partition_spec_subset())))
| 39,321 | 42.788419 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/util/exceptions.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import py4j
from py4j.protocol import Py4JJavaError
class JavaException(Exception):
def __init__(self, stack_trace: str):
self.stack_trace = stack_trace
def __str__(self):
return self.stack_trace
class TableException(JavaException):
"""
General Exception for all errors during table handling.
"""
class CatalogException(JavaException):
"""
A catalog-related exception.
"""
class DatabaseAlreadyExistException(JavaException):
"""
Exception for trying to create a database that already exists.
"""
class DatabaseNotEmptyException(JavaException):
"""
Exception for trying to drop on a database that is not empty.
"""
class DatabaseNotExistException(JavaException):
"""
Exception for trying to operate on a database that doesn't exist.
"""
class FunctionAlreadyExistException(JavaException):
"""
Exception for trying to create a function that already exists.
"""
class FunctionNotExistException(JavaException):
"""
Exception for trying to operate on a function that doesn't exist.
"""
class PartitionAlreadyExistsException(JavaException):
"""
Exception for trying to create a partition that already exists.
"""
class PartitionNotExistException(JavaException):
"""
Exception for operation on a partition that doesn't exist. The cause includes non-existent
table, non-partitioned table, invalid partition spec, etc.
"""
class PartitionSpecInvalidException(JavaException):
"""
Exception for invalid PartitionSpec compared with partition key list of a partitioned Table.
For example, it is thrown when the size of PartitionSpec exceeds the size of partition key
list, or when the size of PartitionSpec is 'n' but its keys don't match the first 'n' keys in
partition key list.
"""
class TableAlreadyExistException(JavaException):
"""
Exception for trying to create a table (or view) that already exists.
"""
class TableNotExistException(JavaException):
"""
Exception for trying to operate on a table (or view) that doesn't exist.
"""
class TableNotPartitionedException(JavaException):
"""
Exception for trying to operate partition on a non-partitioned table.
"""
# Mapping from JavaException to PythonException
exception_mapping = {
"org.apache.flink.table.api.TableException":
TableException,
"org.apache.flink.table.catalog.exceptions.CatalogException":
CatalogException,
"org.apache.flink.table.catalog.exceptions.DatabaseAlreadyExistException":
DatabaseAlreadyExistException,
"org.apache.flink.table.catalog.exceptions.DatabaseNotEmptyException":
DatabaseNotEmptyException,
"org.apache.flink.table.catalog.exceptions.DatabaseNotExistException":
DatabaseNotExistException,
"org.apache.flink.table.catalog.exceptions.FunctionAlreadyExistException":
FunctionAlreadyExistException,
"org.apache.flink.table.catalog.exceptions.FunctionNotExistException":
FunctionNotExistException,
"org.apache.flink.table.catalog.exceptions.PartitionAlreadyExistsException":
PartitionAlreadyExistsException,
"org.apache.flink.table.catalog.exceptions.PartitionNotExistException":
PartitionNotExistException,
"org.apache.flink.table.catalog.exceptions.PartitionSpecInvalidException":
PartitionSpecInvalidException,
"org.apache.flink.table.catalog.exceptions.TableAlreadyExistException":
TableAlreadyExistException,
"org.apache.flink.table.catalog.exceptions.TableNotExistException":
TableNotExistException,
"org.apache.flink.table.catalog.exceptions.TableNotPartitionedException":
TableNotPartitionedException,
}
def capture_java_exception(f):
def deco(*a, **kw):
try:
return f(*a, **kw)
except Py4JJavaError as e:
from pyflink.java_gateway import get_gateway
get_gateway().jvm.org.apache.flink.client.python.PythonEnvUtils\
.setPythonException(e.java_exception)
s = e.java_exception.toString()
for exception in exception_mapping.keys():
if s.startswith(exception):
java_exception = convert_py4j_exception(e)
break
else:
raise
raise java_exception
return deco
def install_exception_handler():
"""
Hook an exception handler into Py4j, which could capture some exceptions in Java.
When calling Java API, it will call `get_return_value` to parse the returned object.
If any exception happened in JVM, the result will be a Java exception object, it raise
py4j.protocol.Py4JJavaError. We replace the original `get_return_value` with one that
could capture the Java exception and throw a Python one (with the same error message).
It's idempotent, could be called multiple times.
"""
original = py4j.protocol.get_return_value
# The original `get_return_value` is not patched, it's idempotent.
patched = capture_java_exception(original)
# only patch the one used in py4j.java_gateway (call Java API)
py4j.java_gateway.get_return_value = patched
def install_py4j_hooks():
"""
Hook the classes such as JavaPackage, etc of Py4j to improve the exception message.
"""
def wrapped_call(self, *args, **kwargs):
raise TypeError(
"Could not found the Java class '%s'. The Java dependencies could be specified via "
"command line argument '--jarfile' or the config option 'pipeline.jars'" % self._fqn)
setattr(py4j.java_gateway.JavaPackage, '__call__', wrapped_call)
def convert_py4j_exception(e: Py4JJavaError) -> JavaException:
"""
Convert Py4J exception to JavaException.
"""
s = e.java_exception.toString()
for exception in exception_mapping.keys():
if s.startswith(exception):
return exception_mapping[exception](str(e).split(': ', 1)[1])
else:
return JavaException(str(e).split(': ', 1)[1])
| 7,051 | 33.910891 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/util/java_utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from datetime import timedelta
from py4j.java_gateway import JavaClass, get_java_class, JavaObject
from py4j.protocol import Py4JJavaError
from pyflink.java_gateway import get_gateway
def to_jarray(j_type, arr):
"""
Convert python list to java type array
:param j_type: java type of element in array
:param arr: python type list
"""
gateway = get_gateway()
j_arr = gateway.new_array(j_type, len(arr))
for i in range(0, len(arr)):
j_arr[i] = arr[i]
return j_arr
def to_j_flink_time(time_delta):
gateway = get_gateway()
TimeUnit = gateway.jvm.java.util.concurrent.TimeUnit
Time = gateway.jvm.org.apache.flink.api.common.time.Time
if isinstance(time_delta, timedelta):
total_microseconds = round(time_delta.total_seconds() * 1000 * 1000)
return Time.of(total_microseconds, TimeUnit.MICROSECONDS)
else:
# time delta in milliseconds
total_milliseconds = time_delta
return Time.milliseconds(total_milliseconds)
def from_j_flink_time(j_flink_time):
total_milliseconds = j_flink_time.toMilliseconds()
return timedelta(milliseconds=total_milliseconds)
def load_java_class(class_name):
gateway = get_gateway()
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
return context_classloader.loadClass(class_name)
def is_instance_of(java_object, java_class):
gateway = get_gateway()
if isinstance(java_class, str):
param = java_class
elif isinstance(java_class, JavaClass):
param = get_java_class(java_class)
elif isinstance(java_class, JavaObject):
if not is_instance_of(java_class, gateway.jvm.Class):
param = java_class.getClass()
else:
param = java_class
else:
raise TypeError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.org.apache.flink.api.python.shaded.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def get_j_env_configuration(j_env):
env_clazz = load_java_class(
"org.apache.flink.streaming.api.environment.StreamExecutionEnvironment")
field = env_clazz.getDeclaredField("configuration")
field.setAccessible(True)
return field.get(j_env)
def get_field_value(java_obj, field_name):
field = get_field(java_obj.getClass(), field_name)
return field.get(java_obj)
def get_field(cls, field_name):
try:
field = cls.getDeclaredField(field_name)
field.setAccessible(True)
return field
except Py4JJavaError:
while cls.getSuperclass() is not None:
cls = cls.getSuperclass()
try:
field = cls.getDeclaredField(field_name)
field.setAccessible(True)
return field
except Py4JJavaError:
pass
def invoke_method(obj, object_type, method_name, args=None, arg_types=None):
env_clazz = load_java_class(object_type)
method = env_clazz.getDeclaredMethod(
method_name,
to_jarray(
get_gateway().jvm.Class,
[load_java_class(arg_type) for arg_type in arg_types or []]))
method.setAccessible(True)
return method.invoke(obj, to_jarray(get_gateway().jvm.Object, args or []))
def is_local_deployment(j_configuration):
jvm = get_gateway().jvm
JDeploymentOptions = jvm.org.apache.flink.configuration.DeploymentOptions
return j_configuration.containsKey(JDeploymentOptions.TARGET.key()) \
and j_configuration.getString(JDeploymentOptions.TARGET.key(), None) in \
("local", "minicluster")
def add_jars_to_context_class_loader(jar_urls):
"""
Add jars to Python gateway server for local compilation and local execution (i.e. minicluster).
There are many component in Flink which won't be added to classpath by default. e.g. Kafka
connector, JDBC connector, CSV format etc. This utility function can be used to hot load the
jars.
:param jar_urls: The list of jar urls.
"""
gateway = get_gateway()
# validate and normalize
jar_urls = [gateway.jvm.java.net.URL(url) for url in jar_urls]
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
existing_urls = []
class_loader_name = context_classloader.getClass().getName()
if class_loader_name == "java.net.URLClassLoader":
existing_urls = set([url.toString() for url in context_classloader.getURLs()])
if all([url.toString() in existing_urls for url in jar_urls]):
# if urls all existed, no need to create new class loader.
return
URLClassLoaderClass = load_java_class("java.net.URLClassLoader")
if is_instance_of(context_classloader, URLClassLoaderClass):
if class_loader_name == "org.apache.flink.runtime.execution.librarycache." \
"FlinkUserCodeClassLoaders$SafetyNetWrapperClassLoader":
ensureInner = context_classloader.getClass().getDeclaredMethod("ensureInner", None)
ensureInner.setAccessible(True)
context_classloader = ensureInner.invoke(context_classloader, None)
addURL = URLClassLoaderClass.getDeclaredMethod(
"addURL",
to_jarray(
gateway.jvm.Class,
[load_java_class("java.net.URL")]))
addURL.setAccessible(True)
for url in jar_urls:
addURL.invoke(context_classloader, to_jarray(get_gateway().jvm.Object, [url]))
else:
context_classloader = create_url_class_loader(jar_urls, context_classloader)
gateway.jvm.Thread.currentThread().setContextClassLoader(context_classloader)
def to_j_explain_detail_arr(p_extra_details):
# sphinx will check "import loop" when generating doc,
# use local import to avoid above error
from pyflink.table.explain_detail import ExplainDetail
gateway = get_gateway()
def to_j_explain_detail(p_extra_detail):
if p_extra_detail == ExplainDetail.JSON_EXECUTION_PLAN:
return gateway.jvm.org.apache.flink.table.api.ExplainDetail.JSON_EXECUTION_PLAN
elif p_extra_detail == ExplainDetail.CHANGELOG_MODE:
return gateway.jvm.org.apache.flink.table.api.ExplainDetail.CHANGELOG_MODE
elif p_extra_detail == ExplainDetail.ESTIMATED_COST:
return gateway.jvm.org.apache.flink.table.api.ExplainDetail.ESTIMATED_COST
else:
return gateway.jvm.org.apache.flink.table.api.ExplainDetail.PLAN_ADVICE
_len = len(p_extra_details) if p_extra_details else 0
j_arr = gateway.new_array(gateway.jvm.org.apache.flink.table.api.ExplainDetail, _len)
for i in range(0, _len):
j_arr[i] = to_j_explain_detail(p_extra_details[i])
return j_arr
def create_url_class_loader(urls, parent_class_loader):
gateway = get_gateway()
url_class_loader = gateway.jvm.java.net.URLClassLoader(
to_jarray(gateway.jvm.java.net.URL, urls), parent_class_loader)
return url_class_loader
| 7,989 | 37.97561 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/util/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/metrics/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics.metricbase import MetricGroup, Metric, Counter, Meter, Distribution
__all__ = ["MetricGroup", "Metric", "Counter", "Meter", "Distribution"]
| 1,121 | 50 | 88 |
py
|
flink
|
flink-master/flink-python/pyflink/metrics/metricbase.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import Callable
class MetricGroup(ABC):
"""
A MetricGroup is a named container for metrics and further metric subgroups.
Instances of this class can be used to register new metrics with Flink and to create a nested
hierarchy based on the group names.
A MetricGroup is uniquely identified by it's place in the hierarchy and name.
.. versionadded:: 1.11.0
"""
@abstractmethod
def add_group(self, name: str, extra: str = None) -> 'MetricGroup':
"""
Creates a new MetricGroup and adds it to this groups sub-groups.
If extra is not None, creates a new key-value MetricGroup pair.
The key group is added to this group's sub-groups, while the value
group is added to the key group's sub-groups. In this case,
the value group will be returned and a user variable will be defined.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def counter(self, name: str) -> 'Counter':
"""
Registers a new `Counter` with Flink.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def gauge(self, name: str, obj: Callable[[], int]) -> None:
"""
Registers a new `Gauge` with Flink.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def meter(self, name: str, time_span_in_seconds: int = 60) -> 'Meter':
"""
Registers a new `Meter` with Flink.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def distribution(self, name: str) -> 'Distribution':
"""
Registers a new `Distribution` with Flink.
.. versionadded:: 1.11.0
"""
pass
class Metric(ABC):
"""
Base interface of a metric object.
.. versionadded:: 1.11.0
"""
pass
class Counter(Metric, ABC):
"""
Counter metric interface. Allows a count to be incremented/decremented
during pipeline execution.
.. versionadded:: 1.11.0
"""
@abstractmethod
def inc(self, n: int = 1):
"""
Increment the current count by the given value.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def dec(self, n: int = 1):
"""
Decrement the current count by 1.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def get_count(self) -> int:
"""
Returns the current count.
.. versionadded:: 1.11.0
"""
pass
class Meter(Metric, ABC):
"""
Meter Metric interface.
Metric for measuring throughput.
.. versionadded:: 1.11.0
"""
@abstractmethod
def mark_event(self, value: int = 1):
"""
Mark occurrence of the specified number of events.
.. versionadded:: 1.11.0
"""
pass
@abstractmethod
def get_count(self) -> int:
"""
Get number of events marked on the meter.
.. versionadded:: 1.11.0
"""
pass
class Distribution(Metric, ABC):
"""
Distribution Metric interface.
Allows statistics about the distribution of a variable to be collected during
pipeline execution.
.. versionadded:: 1.11.0
"""
@abstractmethod
def update(self, value):
pass
| 4,317 | 24.251462 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/internal_state.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import Generic, TypeVar, List, Iterable, Collection
from pyflink.datastream.state import State, ValueState, AppendingState, MergingState, ListState, \
AggregatingState, ReducingState, MapState, ReadOnlyBroadcastState, BroadcastState
N = TypeVar('N')
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
IN = TypeVar('IN')
OUT = TypeVar('OUT')
class InternalKvState(State, Generic[N]):
"""
The :class:InternalKvState is the root of the internal state type hierarchy, similar to the
:class:State being the root of the public API state hierarchy.
The internal state classes give access to the namespace getters and setters and access to
additional functionality, like raw value access or state merging.
The public API state hierarchy is intended to be programmed against by Flink applications. The
internal state hierarchy holds all the auxiliary methods that are used by the runtime and not
intended to be used by user applications. These internal methods are considered of limited use
to users and only confusing, and are usually not regarded as stable across releases.
"""
@abstractmethod
def set_current_namespace(self, namespace: N) -> None:
"""
Sets the current namespace, which will be used when using the state access methods.
:param namespace: The namespace.
"""
pass
class InternalValueState(InternalKvState[N], ValueState[T], ABC):
"""
The peer to the :class:ValueState in the internal state type hierarchy.
"""
pass
class InternalAppendingState(InternalKvState[N], AppendingState[IN, OUT], ABC):
"""
The peer to the :class:AppendingState in the internal state type hierarchy.
"""
pass
class InternalMergingState(InternalAppendingState[N, IN, OUT], MergingState[IN, OUT]):
"""
The peer to the :class:MergingState in the internal state type hierarchy.
"""
@abstractmethod
def merge_namespaces(self, target: N, sources: Collection[N]) -> None:
"""
Merges the state of the current key for the given source namespaces into the state of the
target namespace.
:param target: The target namespace where the merged state should be stored.
:param sources: The source namespaces whose state should be merged.
"""
pass
class InternalListState(InternalMergingState[N, List[T], Iterable[T]], ListState[T], ABC):
"""
The peer to the :class:ListState in the internal state type hierarchy.
"""
pass
class InternalAggregatingState(InternalMergingState[N, IN, OUT], AggregatingState[IN, OUT], ABC):
"""
The peer to the :class:AggregatingState in the internal state type hierarchy.
"""
pass
class InternalReducingState(InternalMergingState[N, T, T], ReducingState[T], ABC):
"""
The peer to the :class:ReducingState in the internal state type hierarchy.
"""
pass
class InternalMapState(InternalKvState[N], MapState[K, V], ABC):
"""
The peer to the :class:MapState in the internal state type hierarchy.
"""
pass
class InternalReadOnlyBroadcastState(ReadOnlyBroadcastState[K, V], ABC):
"""
The peer to :class:`ReadOnlyBroadcastState`.
"""
pass
class InternalBroadcastState(InternalReadOnlyBroadcastState[K, V], BroadcastState[K, V], ABC):
"""
The peer to :class:`BroadcastState`.
"""
@abstractmethod
def to_read_only_broadcast_state(self) -> InternalReadOnlyBroadcastState[K, V]:
"""
Convert to :class:`ReadOnlyBroadcastState` interface with the same underlying state.
"""
pass
| 4,630 | 33.559701 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/pickle.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from threading import RLock
import cloudpickle
_lock = RLock()
def loads(payload):
with _lock:
# there is race condition for pickle.load() used in multi-thread environment,
# see https://bugs.python.org/issue36773 for more details.
return cloudpickle.loads(payload)
| 1,256 | 40.9 | 85 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/state_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import base64
import collections
from abc import ABC, abstractmethod
from apache_beam.coders import coder_impl
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.runners.worker.bundle_processor import SynchronousBagRuntimeState
from apache_beam.transforms import userstate
from enum import Enum
from functools import partial
from io import BytesIO
from typing import List, Tuple, Any, Dict, Collection, cast
from pyflink.datastream import ReduceFunction
from pyflink.datastream.functions import AggregateFunction
from pyflink.datastream.state import StateTtlConfig, MapStateDescriptor, OperatorStateStore
from pyflink.fn_execution.beam.beam_coders import FlinkCoder
from pyflink.fn_execution.coders import FieldCoder, MapCoder, from_type_info
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor as pb2_StateDescriptor
from pyflink.fn_execution.internal_state import InternalKvState, N, InternalValueState, \
InternalListState, InternalReducingState, InternalMergingState, InternalAggregatingState, \
InternalMapState, InternalReadOnlyBroadcastState, InternalBroadcastState
class LRUCache(object):
"""
A simple LRUCache implementation used to manage the internal runtime state.
An internal runtime state is used to handle the data under a specific key of a "public" state.
So the number of the internal runtime states may keep growing during the streaming task
execution. To prevent the OOM caused by the unlimited growth, we introduce this LRUCache
to evict the inactive internal runtime states.
"""
def __init__(self, max_entries, default_entry):
self._max_entries = max_entries
self._default_entry = default_entry
self._cache = collections.OrderedDict()
self._on_evict = None
def get(self, key):
value = self._cache.pop(key, self._default_entry)
if value != self._default_entry:
# update the last access time
self._cache[key] = value
return value
def put(self, key, value):
self._cache[key] = value
while len(self._cache) > self._max_entries:
name, value = self._cache.popitem(last=False)
if self._on_evict is not None:
self._on_evict(name, value)
def evict(self, key):
value = self._cache.pop(key, self._default_entry)
if self._on_evict is not None:
self._on_evict(key, value)
def evict_all(self):
if self._on_evict is not None:
for item in self._cache.items():
self._on_evict(*item)
self._cache.clear()
def set_on_evict(self, func):
self._on_evict = func
def __len__(self):
return len(self._cache)
def __iter__(self):
return iter(self._cache.values())
def __contains__(self, key):
return key in self._cache
class SynchronousKvRuntimeState(InternalKvState, ABC):
"""
Base Class for partitioned State implementation.
"""
def __init__(self, name: str, remote_state_backend: 'RemoteKeyedStateBackend'):
self.name = name
self._remote_state_backend = remote_state_backend
self._internal_state = None
self.namespace = None
self._ttl_config = None
self._cache_type = SynchronousKvRuntimeState.CacheType.ENABLE_READ_WRITE_CACHE
def set_current_namespace(self, namespace: N) -> None:
if namespace == self.namespace:
return
if self.namespace is not None:
self._remote_state_backend.cache_internal_state(
self._remote_state_backend._encoded_current_key, self)
self.namespace = namespace
self._internal_state = None
def enable_time_to_live(self, ttl_config: StateTtlConfig):
self._ttl_config = ttl_config
if ttl_config.get_state_visibility() == StateTtlConfig.StateVisibility.NeverReturnExpired:
self._cache_type = SynchronousKvRuntimeState.CacheType.DISABLE_CACHE
elif ttl_config.get_update_type() == StateTtlConfig.UpdateType.OnReadAndWrite:
self._cache_type = SynchronousKvRuntimeState.CacheType.ENABLE_WRITE_CACHE
if self._cache_type != SynchronousKvRuntimeState.CacheType.ENABLE_READ_WRITE_CACHE:
# disable read cache
self._remote_state_backend._state_handler._state_cache._cache._max_entries = 0
@abstractmethod
def get_internal_state(self):
pass
class CacheType(Enum):
DISABLE_CACHE = 0
ENABLE_WRITE_CACHE = 1
ENABLE_READ_WRITE_CACHE = 2
class SynchronousBagKvRuntimeState(SynchronousKvRuntimeState, ABC):
"""
Base Class for State implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousBagKvRuntimeState, self).__init__(name, remote_state_backend)
self._value_coder = value_coder
def get_internal_state(self):
if self._internal_state is None:
self._internal_state = self._remote_state_backend._get_internal_bag_state(
self.name, self.namespace, self._value_coder, self._ttl_config)
return self._internal_state
def _maybe_clear_write_cache(self):
if self._cache_type == SynchronousKvRuntimeState.CacheType.DISABLE_CACHE or \
self._remote_state_backend._state_cache_size <= 0:
self._internal_state.commit()
self._internal_state._cleared = False
self._internal_state._added_elements = []
class SynchronousValueRuntimeState(SynchronousBagKvRuntimeState, InternalValueState):
"""
The runtime ValueState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousValueRuntimeState, self).__init__(name, value_coder, remote_state_backend)
def value(self):
for i in self.get_internal_state().read():
return i
return None
def update(self, value) -> None:
self.get_internal_state()
self._internal_state.clear()
self._internal_state.add(value)
self._maybe_clear_write_cache()
def clear(self) -> None:
self.get_internal_state().clear()
class SynchronousMergingRuntimeState(SynchronousBagKvRuntimeState, InternalMergingState, ABC):
"""
Base Class for MergingState implementation.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousMergingRuntimeState, self).__init__(
name, value_coder, remote_state_backend)
def merge_namespaces(self, target: N, sources: Collection[N]) -> None:
self._remote_state_backend.merge_namespaces(self, target, sources, self._ttl_config)
class SynchronousListRuntimeState(SynchronousMergingRuntimeState, InternalListState):
"""
The runtime ListState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self, name: str, value_coder, remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousListRuntimeState, self).__init__(name, value_coder, remote_state_backend)
def add(self, v):
self.get_internal_state().add(v)
self._maybe_clear_write_cache()
def get(self):
return self.get_internal_state().read()
def add_all(self, values):
self.get_internal_state()._added_elements.extend(values)
self._maybe_clear_write_cache()
def update(self, values):
self.clear()
self.add_all(values)
self._maybe_clear_write_cache()
def clear(self):
self.get_internal_state().clear()
class SynchronousReducingRuntimeState(SynchronousMergingRuntimeState, InternalReducingState):
"""
The runtime ReducingState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self,
name: str,
value_coder,
remote_state_backend: 'RemoteKeyedStateBackend',
reduce_function: ReduceFunction):
super(SynchronousReducingRuntimeState, self).__init__(
name, value_coder, remote_state_backend)
self._reduce_function = reduce_function
def add(self, v):
current_value = self.get()
if current_value is None:
self._internal_state.add(v)
else:
self._internal_state.clear()
self._internal_state.add(self._reduce_function.reduce(current_value, v))
self._maybe_clear_write_cache()
def get(self):
for i in self.get_internal_state().read():
return i
return None
def clear(self):
self.get_internal_state().clear()
class SynchronousAggregatingRuntimeState(SynchronousMergingRuntimeState, InternalAggregatingState):
"""
The runtime AggregatingState implementation backed by a :class:`SynchronousBagRuntimeState`.
"""
def __init__(self,
name: str,
value_coder,
remote_state_backend: 'RemoteKeyedStateBackend',
agg_function: AggregateFunction):
super(SynchronousAggregatingRuntimeState, self).__init__(
name, value_coder, remote_state_backend)
self._agg_function = agg_function
def add(self, v):
if v is None:
self.clear()
return
accumulator = self._get_accumulator()
if accumulator is None:
accumulator = self._agg_function.create_accumulator()
accumulator = self._agg_function.add(v, accumulator)
self._internal_state.clear()
self._internal_state.add(accumulator)
self._maybe_clear_write_cache()
def get(self):
accumulator = self._get_accumulator()
if accumulator is None:
return None
else:
return self._agg_function.get_result(accumulator)
def _get_accumulator(self):
for i in self.get_internal_state().read():
return i
return None
def clear(self):
self.get_internal_state().clear()
class CachedMapState(LRUCache):
def __init__(self, max_entries):
super(CachedMapState, self).__init__(max_entries, None)
self._all_data_cached = False
self._cached_keys = set()
def on_evict(key, value):
if value[0]:
self._cached_keys.remove(key)
self._all_data_cached = False
self.set_on_evict(on_evict)
def set_all_data_cached(self):
self._all_data_cached = True
def is_all_data_cached(self):
return self._all_data_cached
def put(self, key, exists_and_value):
if exists_and_value[0]:
self._cached_keys.add(key)
super(CachedMapState, self).put(key, exists_and_value)
def get_cached_keys(self):
return self._cached_keys
class IterateType(Enum):
ITEMS = 0
KEYS = 1
VALUES = 2
class IteratorToken(Enum):
"""
The token indicates the status of current underlying iterator. It can also be a UUID,
which represents an iterator on the Java side.
"""
NOT_START = 0
FINISHED = 1
def create_cache_iterator(cache_dict, iterate_type, iterated_keys=None):
if iterated_keys is None:
iterated_keys = []
if iterate_type == IterateType.KEYS:
for key, (exists, value) in cache_dict.items():
if not exists or key in iterated_keys:
continue
yield key, key
elif iterate_type == IterateType.VALUES:
for key, (exists, value) in cache_dict.items():
if not exists or key in iterated_keys:
continue
yield key, value
elif iterate_type == IterateType.ITEMS:
for key, (exists, value) in cache_dict.items():
if not exists or key in iterated_keys:
continue
yield key, (key, value)
else:
raise Exception("Unsupported iterate type: %s" % iterate_type)
class CachingMapStateHandler(object):
# GET request flags
GET_FLAG = 0
ITERATE_FLAG = 1
CHECK_EMPTY_FLAG = 2
# GET response flags
EXIST_FLAG = 0
IS_NONE_FLAG = 1
NOT_EXIST_FLAG = 2
IS_EMPTY_FLAG = 3
NOT_EMPTY_FLAG = 4
# APPEND request flags
DELETE = 0
SET_NONE = 1
SET_VALUE = 2
def __init__(self, caching_state_handler, max_cached_map_key_entries):
self._state_cache = caching_state_handler._state_cache
self._underlying = caching_state_handler._underlying
self._context = caching_state_handler._context
self._max_cached_map_key_entries = max_cached_map_key_entries
self._cached_iterator_num = 0
def _get_cache_token(self):
if not self._state_cache.is_cache_enabled():
return None
if self._context.user_state_cache_token:
return self._context.user_state_cache_token
else:
return self._context.bundle_cache_token
def blocking_get(self, state_key, map_key, map_key_encoder, map_value_decoder):
cache_token = self._get_cache_token()
if not cache_token:
# cache disabled / no cache token, request from remote directly
return self._get_raw(state_key, map_key, map_key_encoder, map_value_decoder)
# lookup cache first
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.peek((cache_state_key, cache_token))
if cached_map_state is None:
# request from remote
exists, value = self._get_raw(state_key, map_key, map_key_encoder, map_value_decoder)
cached_map_state = CachedMapState(self._max_cached_map_key_entries)
cached_map_state.put(map_key, (exists, value))
self._state_cache.put((cache_state_key, cache_token), cached_map_state)
return exists, value
else:
cached_value = cached_map_state.get(map_key)
if cached_value is None:
if cached_map_state.is_all_data_cached():
return False, None
# request from remote
exists, value = self._get_raw(
state_key, map_key, map_key_encoder, map_value_decoder)
cached_map_state.put(map_key, (exists, value))
return exists, value
else:
return cached_value
def lazy_iterator(self, state_key, iterate_type, map_key_decoder, map_value_decoder,
iterated_keys):
cache_token = self._get_cache_token()
if cache_token:
# check if the data in the read cache can be used
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.peek((cache_state_key, cache_token))
if cached_map_state and cached_map_state.is_all_data_cached():
return create_cache_iterator(
cached_map_state._cache, iterate_type, iterated_keys)
# request from remote
last_iterator_token = IteratorToken.NOT_START
current_batch, iterator_token = self._iterate_raw(
state_key, iterate_type,
last_iterator_token,
map_key_decoder,
map_value_decoder)
if cache_token and \
iterator_token == IteratorToken.FINISHED and \
iterate_type != IterateType.KEYS and \
self._max_cached_map_key_entries >= len(current_batch):
# Special case: all the data of the map state is contained in current batch,
# and can be stored in the cached map state.
cached_map_state = CachedMapState(self._max_cached_map_key_entries)
cache_state_key = self._convert_to_cache_key(state_key)
for key, value in current_batch.items():
cached_map_state.put(key, (True, value))
cached_map_state.set_all_data_cached()
self._state_cache.put((cache_state_key, cache_token), cached_map_state)
return self._lazy_remote_iterator(
state_key,
iterate_type,
map_key_decoder,
map_value_decoder,
iterated_keys,
iterator_token,
current_batch)
def _lazy_remote_iterator(
self,
state_key,
iterate_type,
map_key_decoder,
map_value_decoder,
iterated_keys,
iterator_token,
current_batch):
if iterate_type == IterateType.KEYS:
while True:
for key in current_batch:
if key in iterated_keys:
continue
yield key, key
if iterator_token == IteratorToken.FINISHED:
break
current_batch, iterator_token = self._iterate_raw(
state_key,
iterate_type,
iterator_token,
map_key_decoder,
map_value_decoder)
elif iterate_type == IterateType.VALUES:
while True:
for key, value in current_batch.items():
if key in iterated_keys:
continue
yield key, value
if iterator_token == IteratorToken.FINISHED:
break
current_batch, iterator_token = self._iterate_raw(
state_key,
iterate_type,
iterator_token,
map_key_decoder,
map_value_decoder)
elif iterate_type == IterateType.ITEMS:
while True:
for key, value in current_batch.items():
if key in iterated_keys:
continue
yield key, (key, value)
if iterator_token == IteratorToken.FINISHED:
break
current_batch, iterator_token = self._iterate_raw(
state_key,
iterate_type,
iterator_token,
map_key_decoder,
map_value_decoder)
else:
raise Exception("Unsupported iterate type: %s" % iterate_type)
def extend(self, state_key, items: List[Tuple[int, Any, Any]],
map_key_encoder, map_value_encoder):
cache_token = self._get_cache_token()
if cache_token:
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.peek((cache_state_key, cache_token))
if cached_map_state is None:
cached_map_state = CachedMapState(self._max_cached_map_key_entries)
self._state_cache.put((cache_state_key, cache_token), cached_map_state)
for request_flag, map_key, map_value in items:
if request_flag == self.DELETE:
cached_map_state.put(map_key, (False, None))
elif request_flag == self.SET_NONE:
cached_map_state.put(map_key, (True, None))
elif request_flag == self.SET_VALUE:
cached_map_state.put(map_key, (True, map_value))
else:
raise Exception("Unknown flag: " + str(request_flag))
return self._append_raw(
state_key,
items,
map_key_encoder,
map_value_encoder)
def check_empty(self, state_key):
cache_token = self._get_cache_token()
if cache_token:
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_map_state = self._state_cache.peek((cache_state_key, cache_token))
if cached_map_state is not None:
if cached_map_state.is_all_data_cached() and \
len(cached_map_state.get_cached_keys()) == 0:
return True
elif len(cached_map_state.get_cached_keys()) > 0:
return False
return self._check_empty_raw(state_key)
def clear(self, state_key):
self.clear_read_cache(state_key)
return self._underlying.clear(state_key)
def clear_read_cache(self, state_key):
cache_token = self._get_cache_token()
if cache_token:
cache_key = self._convert_to_cache_key(state_key)
self._state_cache.invalidate((cache_key, cache_token))
def get_cached_iterators_num(self):
return self._cached_iterator_num
def _inc_cached_iterators_num(self):
self._cached_iterator_num += 1
def _dec_cached_iterators_num(self):
self._cached_iterator_num -= 1
def reset_cached_iterators_num(self):
self._cached_iterator_num = 0
def _check_empty_raw(self, state_key):
output_stream = coder_impl.create_OutputStream()
output_stream.write_byte(self.CHECK_EMPTY_FLAG)
continuation_token = output_stream.get()
data, response_token = self._underlying.get_raw(state_key, continuation_token)
if data[0] == self.IS_EMPTY_FLAG:
return True
elif data[0] == self.NOT_EMPTY_FLAG:
return False
else:
raise Exception("Unknown response flag: " + str(data[0]))
def _get_raw(self, state_key, map_key, map_key_encoder, map_value_decoder):
output_stream = coder_impl.create_OutputStream()
output_stream.write_byte(self.GET_FLAG)
map_key_encoder(map_key, output_stream)
continuation_token = output_stream.get()
data, response_token = self._underlying.get_raw(state_key, continuation_token)
input_stream = coder_impl.create_InputStream(data)
result_flag = input_stream.read_byte()
if result_flag == self.EXIST_FLAG:
return True, map_value_decoder(input_stream)
elif result_flag == self.IS_NONE_FLAG:
return True, None
elif result_flag == self.NOT_EXIST_FLAG:
return False, None
else:
raise Exception("Unknown response flag: " + str(result_flag))
def _iterate_raw(self, state_key, iterate_type, iterator_token,
map_key_decoder, map_value_decoder):
output_stream = coder_impl.create_OutputStream()
output_stream.write_byte(self.ITERATE_FLAG)
output_stream.write_byte(iterate_type.value)
if not isinstance(iterator_token, IteratorToken):
# The iterator token represents a Java iterator
output_stream.write_bigendian_int32(len(iterator_token))
output_stream.write(iterator_token)
else:
output_stream.write_bigendian_int32(0)
continuation_token = output_stream.get()
data, response_token = self._underlying.get_raw(state_key, continuation_token)
if len(response_token) != 0:
# The new iterator token is an UUID which represents a cached iterator at Java
# side.
new_iterator_token = response_token
if iterator_token == IteratorToken.NOT_START:
# This is the first request but not the last request of current state.
# It means there is a new iterator has been created and cached at Java side.
self._inc_cached_iterators_num()
else:
new_iterator_token = IteratorToken.FINISHED
if iterator_token != IteratorToken.NOT_START:
# This is not the first request but the last request of current state.
# It means the cached iterator created at Java side has been removed as
# current iteration has finished.
self._dec_cached_iterators_num()
input_stream = coder_impl.create_InputStream(data)
if iterate_type == IterateType.ITEMS or iterate_type == IterateType.VALUES:
# decode both key and value
current_batch = {}
while input_stream.size() > 0:
key = map_key_decoder(input_stream)
is_not_none = input_stream.read_byte()
if is_not_none:
value = map_value_decoder(input_stream)
else:
value = None
current_batch[key] = value
else:
# only decode key
current_batch = []
while input_stream.size() > 0:
key = map_key_decoder(input_stream)
current_batch.append(key)
return current_batch, new_iterator_token
def _append_raw(self, state_key, items, map_key_encoder, map_value_encoder):
output_stream = coder_impl.create_OutputStream()
output_stream.write_bigendian_int32(len(items))
for request_flag, map_key, map_value in items:
output_stream.write_byte(request_flag)
# Not all the coder impls will serialize the length of bytes when we set the "nested"
# param to "True", so we need to encode the length of bytes manually.
tmp_out = coder_impl.create_OutputStream()
map_key_encoder(map_key, tmp_out)
serialized_data = tmp_out.get()
output_stream.write_bigendian_int32(len(serialized_data))
output_stream.write(serialized_data)
if request_flag == self.SET_VALUE:
tmp_out = coder_impl.create_OutputStream()
map_value_encoder(map_value, tmp_out)
serialized_data = tmp_out.get()
output_stream.write_bigendian_int32(len(serialized_data))
output_stream.write(serialized_data)
return self._underlying.append_raw(state_key, output_stream.get())
@staticmethod
def _convert_to_cache_key(state_key):
return state_key.SerializeToString()
class RemovableConcatIterator(collections.abc.Iterator):
def __init__(self, internal_map_state, first, second):
self._first = first
self._second = second
self._first_not_finished = True
self._internal_map_state = internal_map_state
self._mod_count = self._internal_map_state._mod_count
self._last_key = None
def __next__(self):
self._check_modification()
if self._first_not_finished:
try:
self._last_key, element = next(self._first)
return element
except StopIteration:
self._first_not_finished = False
return self.__next__()
else:
self._last_key, element = next(self._second)
return element
def remove(self):
"""
Remove the last element returned by this iterator.
"""
if self._last_key is None:
raise Exception("You need to call the '__next__' method before calling "
"this method.")
self._check_modification()
# Bypass the 'remove' method of the map state to avoid triggering the commit of the write
# cache.
if self._internal_map_state._cleared:
del self._internal_map_state._write_cache[self._last_key]
if len(self._internal_map_state._write_cache) == 0:
self._internal_map_state._is_empty = True
else:
self._internal_map_state._write_cache[self._last_key] = (False, None)
self._mod_count += 1
self._internal_map_state._mod_count += 1
self._last_key = None
def _check_modification(self):
if self._mod_count != self._internal_map_state._mod_count:
raise Exception("Concurrent modification detected. "
"You can not modify the map state when iterating it except using the "
"'remove' method of this iterator.")
class InternalSynchronousMapRuntimeState(object):
def __init__(self,
map_state_handler: CachingMapStateHandler,
state_key,
map_key_coder,
map_value_coder,
max_write_cache_entries):
self._map_state_handler = map_state_handler
self._state_key = state_key
self._map_key_coder = map_key_coder
if isinstance(map_key_coder, FieldCoder):
map_key_coder_impl = FlinkCoder(map_key_coder).get_impl()
else:
map_key_coder_impl = map_key_coder.get_impl()
self._map_key_encoder, self._map_key_decoder = \
self._get_encoder_and_decoder(map_key_coder_impl)
self._map_value_coder = map_value_coder
if isinstance(map_value_coder, FieldCoder):
map_value_coder_impl = FlinkCoder(map_value_coder).get_impl()
else:
map_value_coder_impl = map_value_coder.get_impl()
self._map_value_encoder, self._map_value_decoder = \
self._get_encoder_and_decoder(map_value_coder_impl)
self._write_cache = dict()
self._max_write_cache_entries = max_write_cache_entries
self._is_empty = None
self._cleared = False
self._mod_count = 0
def get(self, map_key):
if self._is_empty:
return None
if map_key in self._write_cache:
exists, value = self._write_cache[map_key]
if exists:
return value
else:
return None
if self._cleared:
return None
exists, value = self._map_state_handler.blocking_get(
self._state_key, map_key, self._map_key_encoder, self._map_value_decoder)
if exists:
return value
else:
return None
def put(self, map_key, map_value):
self._write_cache[map_key] = (True, map_value)
self._is_empty = False
self._mod_count += 1
if len(self._write_cache) >= self._max_write_cache_entries:
self.commit()
def put_all(self, dict_value):
for map_key, map_value in dict_value:
self._write_cache[map_key] = (True, map_value)
self._is_empty = False
self._mod_count += 1
if len(self._write_cache) >= self._max_write_cache_entries:
self.commit()
def remove(self, map_key):
if self._is_empty:
return
if self._cleared:
del self._write_cache[map_key]
if len(self._write_cache) == 0:
self._is_empty = True
else:
self._write_cache[map_key] = (False, None)
self._is_empty = None
self._mod_count += 1
if len(self._write_cache) >= self._max_write_cache_entries:
self.commit()
def contains(self, map_key):
if self._is_empty:
return False
if self.get(map_key) is None:
return False
else:
return True
def is_empty(self):
if self._is_empty is None:
if len(self._write_cache) > 0:
self.commit()
self._is_empty = self._map_state_handler.check_empty(self._state_key)
return self._is_empty
def clear(self):
self._cleared = True
self._is_empty = True
self._mod_count += 1
self._write_cache.clear()
def items(self):
return RemovableConcatIterator(
self,
self.write_cache_iterator(IterateType.ITEMS),
self.remote_data_iterator(IterateType.ITEMS))
def keys(self):
return RemovableConcatIterator(
self,
self.write_cache_iterator(IterateType.KEYS),
self.remote_data_iterator(IterateType.KEYS))
def values(self):
return RemovableConcatIterator(
self,
self.write_cache_iterator(IterateType.VALUES),
self.remote_data_iterator(IterateType.VALUES))
def commit(self):
to_await = None
if self._cleared:
to_await = self._map_state_handler.clear(self._state_key)
if self._write_cache:
append_items = []
for map_key, (exists, value) in self._write_cache.items():
if exists:
if value is not None:
append_items.append(
(CachingMapStateHandler.SET_VALUE, map_key, value))
else:
append_items.append((CachingMapStateHandler.SET_NONE, map_key, None))
else:
append_items.append((CachingMapStateHandler.DELETE, map_key, None))
self._write_cache.clear()
to_await = self._map_state_handler.extend(
self._state_key, append_items, self._map_key_encoder, self._map_value_encoder)
if to_await:
to_await.get()
self._write_cache.clear()
self._cleared = False
self._mod_count += 1
def write_cache_iterator(self, iterate_type):
return create_cache_iterator(self._write_cache, iterate_type)
def remote_data_iterator(self, iterate_type):
if self._cleared or self._is_empty:
return iter([])
else:
return self._map_state_handler.lazy_iterator(
self._state_key,
iterate_type,
self._map_key_decoder,
self._map_value_decoder,
self._write_cache)
@staticmethod
def _get_encoder_and_decoder(coder):
encoder = partial(coder.encode_to_stream, nested=True)
decoder = partial(coder.decode_from_stream, nested=True)
return encoder, decoder
class SynchronousMapRuntimeState(SynchronousKvRuntimeState, InternalMapState):
def __init__(self,
name: str,
map_key_coder,
map_value_coder,
remote_state_backend: 'RemoteKeyedStateBackend'):
super(SynchronousMapRuntimeState, self).__init__(name, remote_state_backend)
self._map_key_coder = map_key_coder
self._map_value_coder = map_value_coder
def get_internal_state(self):
if self._internal_state is None:
self._internal_state = self._remote_state_backend._get_internal_map_state(
self.name,
self.namespace,
self._map_key_coder,
self._map_value_coder,
self._ttl_config,
self._cache_type)
return self._internal_state
def get(self, key):
return self.get_internal_state().get(key)
def put(self, key, value):
self.get_internal_state().put(key, value)
def put_all(self, dict_value):
self.get_internal_state().put_all(dict_value)
def remove(self, key):
self.get_internal_state().remove(key)
def contains(self, key):
return self.get_internal_state().contains(key)
def items(self):
return self.get_internal_state().items()
def keys(self):
return self.get_internal_state().keys()
def values(self):
return self.get_internal_state().values()
def is_empty(self):
return self.get_internal_state().is_empty()
def clear(self):
self.get_internal_state().clear()
class RemoteKeyedStateBackend(object):
"""
A keyed state backend provides methods for managing keyed state.
"""
MERGE_NAMESAPCES_MARK = "merge_namespaces"
def __init__(self,
state_handler,
key_coder,
namespace_coder,
state_cache_size,
map_state_read_cache_size,
map_state_write_cache_size):
self._state_handler = state_handler
self._map_state_handler = CachingMapStateHandler(
state_handler, map_state_read_cache_size)
self._key_coder_impl = key_coder.get_impl()
self.namespace_coder = namespace_coder
if namespace_coder:
self._namespace_coder_impl = namespace_coder.get_impl()
else:
self._namespace_coder_impl = None
self._state_cache_size = state_cache_size
self._map_state_write_cache_size = map_state_write_cache_size
self._all_states = {} # type: Dict[str, SynchronousKvRuntimeState]
self._internal_state_cache = LRUCache(self._state_cache_size, None)
self._internal_state_cache.set_on_evict(
lambda key, value: self.commit_internal_state(value))
self._current_key = None
self._encoded_current_key = None
self._clear_iterator_mark = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id="clear_iterators",
side_input_id="clear_iterators",
key=self._encoded_current_key))
def get_list_state(self, name, element_coder, ttl_config=None):
return self._wrap_internal_bag_state(
name,
element_coder,
SynchronousListRuntimeState,
SynchronousListRuntimeState,
ttl_config)
def get_value_state(self, name, value_coder, ttl_config=None):
return self._wrap_internal_bag_state(
name,
value_coder,
SynchronousValueRuntimeState,
SynchronousValueRuntimeState,
ttl_config)
def get_map_state(self, name, map_key_coder, map_value_coder, ttl_config=None):
if name in self._all_states:
self.validate_map_state(name, map_key_coder, map_value_coder)
return self._all_states[name]
map_state = SynchronousMapRuntimeState(name, map_key_coder, map_value_coder, self)
if ttl_config is not None:
map_state.enable_time_to_live(ttl_config)
self._all_states[name] = map_state
return map_state
def get_reducing_state(self, name, coder, reduce_function, ttl_config=None):
return self._wrap_internal_bag_state(
name,
coder,
SynchronousReducingRuntimeState,
partial(SynchronousReducingRuntimeState, reduce_function=reduce_function),
ttl_config)
def get_aggregating_state(self, name, coder, agg_function, ttl_config=None):
return self._wrap_internal_bag_state(
name,
coder,
SynchronousAggregatingRuntimeState,
partial(SynchronousAggregatingRuntimeState, agg_function=agg_function),
ttl_config)
def validate_state(self, name, coder, expected_type):
if name in self._all_states:
state = self._all_states[name]
if not isinstance(state, expected_type):
raise Exception("The state name '%s' is already in use and not a %s."
% (name, expected_type))
if state._value_coder != coder:
raise Exception("State name corrupted: %s" % name)
def validate_map_state(self, name, map_key_coder, map_value_coder):
if name in self._all_states:
state = self._all_states[name]
if not isinstance(state, SynchronousMapRuntimeState):
raise Exception("The state name '%s' is already in use and not a map state."
% name)
if state._map_key_coder != map_key_coder or \
state._map_value_coder != map_value_coder:
raise Exception("State name corrupted: %s" % name)
def _wrap_internal_bag_state(
self, name, element_coder, wrapper_type, wrap_method, ttl_config):
if name in self._all_states:
self.validate_state(name, element_coder, wrapper_type)
return self._all_states[name]
wrapped_state = wrap_method(name, element_coder, self)
if ttl_config is not None:
wrapped_state.enable_time_to_live(ttl_config)
self._all_states[name] = wrapped_state
return wrapped_state
def _get_internal_bag_state(self, name, namespace, element_coder, ttl_config):
encoded_namespace = self._encode_namespace(namespace)
cached_state = self._internal_state_cache.get(
(name, self._encoded_current_key, encoded_namespace))
if cached_state is not None:
return cached_state
# The created internal state would not be put into the internal state cache
# at once. The internal state cache is only updated when the current key changes.
# The reason is that the state cache size may be smaller that the count of activated
# state (i.e. the state with current key).
if isinstance(element_coder, FieldCoder):
element_coder = FlinkCoder(element_coder)
state_spec = userstate.BagStateSpec(name, element_coder)
internal_state = self._create_bag_state(state_spec, encoded_namespace, ttl_config)
return internal_state
def _get_internal_map_state(
self, name, namespace, map_key_coder, map_value_coder, ttl_config, cache_type):
encoded_namespace = self._encode_namespace(namespace)
cached_state = self._internal_state_cache.get(
(name, self._encoded_current_key, encoded_namespace))
if cached_state is not None:
return cached_state
internal_map_state = self._create_internal_map_state(
name, encoded_namespace, map_key_coder, map_value_coder, ttl_config, cache_type)
return internal_map_state
def _create_bag_state(self, state_spec: userstate.StateSpec, encoded_namespace, ttl_config) \
-> userstate.AccumulatingRuntimeState:
if isinstance(state_spec, userstate.BagStateSpec):
bag_state = SynchronousBagRuntimeState(
self._state_handler,
state_key=self.get_bag_state_key(
state_spec.name, self._encoded_current_key, encoded_namespace, ttl_config),
value_coder=state_spec.coder)
return bag_state
else:
raise NotImplementedError(state_spec)
def _create_internal_map_state(
self, name, encoded_namespace, map_key_coder, map_value_coder, ttl_config, cache_type):
# Currently the `beam_fn_api.proto` does not support MapState, so we use the
# the `MultimapSideInput` message to mark the state as a MapState for now.
state_proto = pb2_StateDescriptor()
state_proto.state_name = name
if ttl_config is not None:
state_proto.state_ttl_config.CopyFrom(ttl_config._to_proto())
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
transform_id="",
window=encoded_namespace,
side_input_id=base64.b64encode(state_proto.SerializeToString()),
key=self._encoded_current_key))
if cache_type == SynchronousKvRuntimeState.CacheType.DISABLE_CACHE:
write_cache_size = 0
else:
write_cache_size = self._map_state_write_cache_size
return InternalSynchronousMapRuntimeState(
self._map_state_handler,
state_key,
map_key_coder,
map_value_coder,
write_cache_size)
def _encode_namespace(self, namespace):
if namespace is not None:
encoded_namespace = self._namespace_coder_impl.encode(namespace)
else:
encoded_namespace = b''
return encoded_namespace
def cache_internal_state(self, encoded_key, internal_kv_state: SynchronousKvRuntimeState):
encoded_old_namespace = self._encode_namespace(internal_kv_state.namespace)
self._internal_state_cache.put(
(internal_kv_state.name, encoded_key, encoded_old_namespace),
internal_kv_state.get_internal_state())
def set_current_key(self, key):
if key == self._current_key:
return
encoded_old_key = self._encoded_current_key
for state_name, state_obj in self._all_states.items():
if self._state_cache_size > 0:
# cache old internal state
self.cache_internal_state(encoded_old_key, state_obj)
state_obj.namespace = None
state_obj._internal_state = None
self._current_key = key
self._encoded_current_key = self._key_coder_impl.encode(self._current_key)
def get_current_key(self):
return self._current_key
def commit(self):
for internal_state in self._internal_state_cache:
self.commit_internal_state(internal_state)
for name, state in self._all_states.items():
if (name, self._encoded_current_key, self._encode_namespace(state.namespace)) \
not in self._internal_state_cache:
self.commit_internal_state(state._internal_state)
def clear_cached_iterators(self):
if self._map_state_handler.get_cached_iterators_num() > 0:
self._clear_iterator_mark.multimap_side_input.key = self._encoded_current_key
self._map_state_handler.clear(self._clear_iterator_mark)
def merge_namespaces(self, state: SynchronousMergingRuntimeState, target, sources, ttl_config):
for source in sources:
state.set_current_namespace(source)
self.commit_internal_state(state.get_internal_state())
state.set_current_namespace(target)
self.commit_internal_state(state.get_internal_state())
encoded_target_namespace = self._encode_namespace(target)
encoded_namespaces = [self._encode_namespace(source) for source in sources]
self.clear_state_cache(state, [encoded_target_namespace] + encoded_namespaces)
state_key = self.get_bag_state_key(
state.name, self._encoded_current_key, encoded_target_namespace, ttl_config)
state_key.bag_user_state.transform_id = self.MERGE_NAMESAPCES_MARK
encoded_namespaces_writer = BytesIO()
encoded_namespaces_writer.write(len(sources).to_bytes(4, 'big'))
for encoded_namespace in encoded_namespaces:
encoded_namespaces_writer.write(encoded_namespace)
sources_bytes = encoded_namespaces_writer.getvalue()
to_await = self._map_state_handler._underlying.append_raw(state_key, sources_bytes)
if to_await:
to_await.get()
def clear_state_cache(self, state: SynchronousMergingRuntimeState, encoded_namespaces):
name = state.name
for encoded_namespace in encoded_namespaces:
if (name, self._encoded_current_key, encoded_namespace) in self._internal_state_cache:
# commit and clear the write cache
self._internal_state_cache.evict(
(name, self._encoded_current_key, encoded_namespace))
# currently all the SynchronousMergingRuntimeState is based on bag state
state_key = self.get_bag_state_key(
name, self._encoded_current_key, encoded_namespace, None)
# clear the read cache, the read cache is shared between map state handler and bag
# state handler. So we can use the map state handler instead.
self._map_state_handler.clear_read_cache(state_key)
def get_bag_state_key(self, name, encoded_key, encoded_namespace, ttl_config):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
state_proto = StateDescriptor()
state_proto.state_name = name
if ttl_config is not None:
state_proto.state_ttl_config.CopyFrom(ttl_config._to_proto())
return beam_fn_api_pb2.StateKey(
bag_user_state=beam_fn_api_pb2.StateKey.BagUserState(
transform_id="",
window=encoded_namespace,
user_state_id=base64.b64encode(state_proto.SerializeToString()),
key=encoded_key))
@staticmethod
def commit_internal_state(internal_state):
if internal_state is not None:
internal_state.commit()
# reset the status of the internal state to reuse the object cross bundle
if isinstance(internal_state, SynchronousBagRuntimeState):
internal_state._cleared = False
internal_state._added_elements = []
class SynchronousReadOnlyBroadcastRuntimeState(InternalReadOnlyBroadcastState):
def __init__(self, name: str, internal_map_state: "InternalSynchronousMapRuntimeState"):
self._name = name
self._internal_map_state = internal_map_state
def get(self, key):
return self._internal_map_state.get(key)
def contains(self, key) -> bool:
return self._internal_map_state.contains(key)
def items(self):
return self._internal_map_state.items()
def keys(self):
return self._internal_map_state.keys()
def values(self):
return self._internal_map_state.values()
def is_empty(self):
return self._internal_map_state.is_empty()
def clear(self):
return self._internal_map_state.clear()
class SynchronousBroadcastRuntimeState(
SynchronousReadOnlyBroadcastRuntimeState, InternalBroadcastState
):
def __init__(self, name: str, internal_map_state: "InternalSynchronousMapRuntimeState"):
super(SynchronousBroadcastRuntimeState, self).__init__(name, internal_map_state)
def put(self, key, value):
self._internal_map_state.put(key, value)
def put_all(self, dict_value):
self._internal_map_state.put_all(dict_value)
def remove(self, key):
self._internal_map_state.remove(key)
def commit(self):
self._internal_map_state.commit()
def to_read_only_broadcast_state(self) -> "SynchronousReadOnlyBroadcastRuntimeState":
return SynchronousReadOnlyBroadcastRuntimeState(self._name, self._internal_map_state)
class OperatorStateBackend(OperatorStateStore, ABC):
@abstractmethod
def commit(self):
pass
class RemoteOperatorStateBackend(OperatorStateBackend):
def __init__(
self, state_handler, state_cache_size, map_state_read_cache_size, map_state_write_cache_size
):
self._state_handler = state_handler
self._state_cache_size = state_cache_size
# NOTE: if user stores a state into a class member, that state actually won't be actually
# evicted from memory (because its counter > 0)
self._state_cache = LRUCache(state_cache_size, None)
self._state_cache.set_on_evict(lambda _, state: state.commit())
self._map_state_read_cache_size = map_state_read_cache_size
self._map_state_write_cache_size = map_state_write_cache_size
self._map_state_handler = CachingMapStateHandler(state_handler, map_state_read_cache_size)
def get_broadcast_state(
self, state_descriptor: MapStateDescriptor
) -> 'SynchronousBroadcastRuntimeState':
state_name = state_descriptor.name
map_coder = cast(MapCoder, from_type_info(state_descriptor.type_info)) # type: MapCoder
key_coder = map_coder._key_coder
value_coder = map_coder._value_coder
if state_name in self._state_cache:
self._validate_broadcast_state(state_name, key_coder, value_coder)
return self._state_cache.get(state_name)
state_proto = pb2_StateDescriptor()
state_proto.state_name = state_name
# Currently, MultimapKeysSideInput is used for BroadcastState
state_key = beam_fn_api_pb2.StateKey(
multimap_keys_side_input=beam_fn_api_pb2.StateKey.MultimapKeysSideInput(
transform_id="",
window=bytes(),
side_input_id=base64.b64encode(state_proto.SerializeToString()),
)
)
internal_map_state = InternalSynchronousMapRuntimeState(
self._map_state_handler,
state_key,
key_coder,
value_coder,
self._map_state_write_cache_size,
)
broadcast_state = SynchronousBroadcastRuntimeState(
state_descriptor.name, internal_map_state
)
self._state_cache.put(state_name, broadcast_state)
return broadcast_state
def commit(self):
for state in self._state_cache:
cast(SynchronousBroadcastRuntimeState, state).commit()
def _validate_broadcast_state(self, name, key_coder, value_coder):
if name in self._state_cache:
state = cast(SynchronousBroadcastRuntimeState, self._state_cache.get(name))
if (
key_coder != state._internal_map_state._map_key_coder
or value_coder != state._internal_map_state._map_value_coder
):
raise Exception("State name corrupted: %s" % name)
| 53,898 | 39.223134 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/coder_impl_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import pickle
from abc import ABC, abstractmethod
from typing import List
import cloudpickle
import avro.schema as avro_schema
from pyflink.common import Row, RowKind
from pyflink.common.time import Instant
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.ResettableIO import ResettableIO
from pyflink.fn_execution.formats.avro import FlinkAvroDecoder, FlinkAvroDatumReader, \
FlinkAvroBufferWrapper, FlinkAvroEncoder, FlinkAvroDatumWriter
from pyflink.fn_execution.stream_slow import InputStream, OutputStream
from pyflink.table.utils import pandas_to_arrow, arrow_to_pandas
ROW_KIND_BIT_SIZE = 2
class LengthPrefixBaseCoderImpl(ABC):
"""
LengthPrefixBaseCoder will be used in Operations and other coders will be the field coder of
LengthPrefixBaseCoder.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
self._field_coder = field_coder
self._data_out_stream = OutputStream()
def _write_data_to_output_stream(self, out_stream: OutputStream):
out_stream.write_var_int64(self._data_out_stream.size())
out_stream.write(self._data_out_stream.get())
self._data_out_stream.clear()
class FieldCoderImpl(ABC):
@abstractmethod
def encode_to_stream(self, value, out_stream: OutputStream):
"""
Encodes `value` to the output stream.
:param value: The output data
:param out_stream: Output Stream
"""
pass
@abstractmethod
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
"""
Decodes data from the input stream.
:param in_stream: Input Stream
:param length: The `length` size data of input stream will be decoded. The default value is
0 which means the coder won't take use of the length to decode the data from input stream.
:return: The decoded Data.
"""
pass
def encode(self, value):
out = OutputStream()
self.encode_to_stream(value, out)
return out.get()
def decode(self, encoded):
return self.decode_from_stream(InputStream(encoded), len(encoded))
class IterableCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes iterable data to output stream. The output mode will decide whether write a special end
message 0x00 to output stream after encoding data.
"""
def __init__(self, field_coder: 'FieldCoderImpl', separated_with_end_message: bool):
super(IterableCoderImpl, self).__init__(field_coder)
self._separated_with_end_message = separated_with_end_message
def encode_to_stream(self, value: List, out_stream: OutputStream):
if value:
for item in value:
self._field_coder.encode_to_stream(item, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
# write end message
if self._separated_with_end_message:
out_stream.write_var_int64(1)
out_stream.write_byte(0x00)
def decode_from_stream(self, in_stream: InputStream):
while in_stream.size() > 0:
yield self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
class ValueCoderImpl(LengthPrefixBaseCoderImpl):
"""
Encodes a single data to output stream.
"""
def __init__(self, field_coder: 'FieldCoderImpl'):
super(ValueCoderImpl, self).__init__(field_coder)
def encode_to_stream(self, value, out_stream: OutputStream):
self._field_coder.encode_to_stream(value, self._data_out_stream)
self._write_data_to_output_stream(out_stream)
def decode_from_stream(self, in_stream: InputStream):
return self._field_coder.decode_from_stream(in_stream, in_stream.read_var_int64())
class MaskUtils:
"""
A util class used to encode mask value.
"""
def __init__(self, field_count):
self._field_count = field_count
# the row kind uses the first 2 bits of the bitmap, the remaining bits are used for null
# mask, for more details refer to:
# https://github.com/apache/flink/blob/master/flink-core/src/main/java/org/apache/flink/api/java/typeutils/runtime/RowSerializer.java
self._leading_complete_bytes_num = (self._field_count + ROW_KIND_BIT_SIZE) // 8
self._remaining_bits_num = (self._field_count + ROW_KIND_BIT_SIZE) % 8
self.null_mask_search_table = self.generate_null_mask_search_table()
self.null_byte_search_table = (0x80, 0x40, 0x20, 0x10, 0x08, 0x04, 0x02, 0x01)
self.row_kind_search_table = [0x00, 0x80, 0x40, 0xC0]
@staticmethod
def generate_null_mask_search_table():
"""
Each bit of one byte represents if the column at the corresponding position is None or not,
e.g. 0x84 represents the first column and the sixth column are None.
"""
null_mask = []
for b in range(256):
every_num_null_mask = [(b & 0x80) > 0, (b & 0x40) > 0, (b & 0x20) > 0, (b & 0x10) > 0,
(b & 0x08) > 0, (b & 0x04) > 0, (b & 0x02) > 0, (b & 0x01) > 0]
null_mask.append(tuple(every_num_null_mask))
return tuple(null_mask)
def write_mask(self, value, row_kind_value, out_stream: OutputStream):
field_pos = 0
null_byte_search_table = self.null_byte_search_table
remaining_bits_num = self._remaining_bits_num
# first byte contains the row kind bits
b = self.row_kind_search_table[row_kind_value]
for i in range(0, 8 - ROW_KIND_BIT_SIZE):
if field_pos + i < len(value) and value[field_pos + i] is None:
b |= null_byte_search_table[i + ROW_KIND_BIT_SIZE]
field_pos += 8 - ROW_KIND_BIT_SIZE
out_stream.write_byte(b)
for _ in range(1, self._leading_complete_bytes_num):
b = 0x00
for i in range(0, 8):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
field_pos += 8
out_stream.write_byte(b)
if self._leading_complete_bytes_num >= 1 and remaining_bits_num:
b = 0x00
for i in range(remaining_bits_num):
if value[field_pos + i] is None:
b |= null_byte_search_table[i]
out_stream.write_byte(b)
def read_mask(self, in_stream: InputStream):
mask = []
mask_search_table = self.null_mask_search_table
remaining_bits_num = self._remaining_bits_num
for _ in range(self._leading_complete_bytes_num):
b = in_stream.read_byte()
mask.extend(mask_search_table[b])
if remaining_bits_num:
b = in_stream.read_byte()
mask.extend(mask_search_table[b][0:remaining_bits_num])
return mask
class FlattenRowCoderImpl(FieldCoderImpl):
"""
A coder for flatten row (List) object (without field names and row kind value is 0).
"""
def __init__(self, field_coders: List[FieldCoderImpl]):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, value, out_stream: OutputStream):
if not isinstance(value, List):
raise TypeError('Expected list, got {0}'.format(type(value)))
# encode mask value
self._mask_utils.write_mask(value, 0, out_stream)
# encode every field value
for i in range(self._field_count):
item = value[i]
if item is not None:
self._field_coders[i].encode_to_stream(item, out_stream)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
row_kind_and_null_mask = self._mask_utils.read_mask(in_stream)
return [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(in_stream)
for idx in range(0, self._field_count)]
def __repr__(self):
return 'FlattenRowCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class RowCoderImpl(FieldCoderImpl):
"""
A coder for `Row` object.
"""
def __init__(self, field_coders, field_names):
self._field_coders = field_coders
self._field_count = len(field_coders)
self._field_names = field_names
self._mask_utils = MaskUtils(self._field_count)
def encode_to_stream(self, value: Row, out_stream: OutputStream):
# encode mask value
values = value.get_fields_by_names(self._field_names)
self._mask_utils.write_mask(values, value.get_row_kind().value, out_stream)
# encode every field value
for i in range(self._field_count):
item = values[i]
if item is not None:
self._field_coders[i].encode_to_stream(item, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0) -> Row:
row_kind_and_null_mask = self._mask_utils.read_mask(in_stream)
fields = [None if row_kind_and_null_mask[idx + ROW_KIND_BIT_SIZE] else
self._field_coders[idx].decode_from_stream(in_stream)
for idx in range(0, self._field_count)]
# compute the row_kind value
row_kind_value = 0
for i in range(ROW_KIND_BIT_SIZE):
row_kind_value += int(row_kind_and_null_mask[i]) * 2 ** i
row = Row(*fields)
row.set_field_names(self._field_names)
row.set_row_kind(RowKind(row_kind_value))
return row
def __repr__(self):
return 'RowCoderImpl[%s, %s]' % \
(', '.join(str(c) for c in self._field_coders), self._field_names)
class ArrowCoderImpl(FieldCoderImpl):
"""
A coder for arrow format data.
"""
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
self._resettable_io = ResettableIO()
self._batch_reader = ArrowCoderImpl._load_from_stream(self._resettable_io)
def encode_to_stream(self, cols, out_stream: OutputStream):
import pyarrow as pa
self._resettable_io.set_output_stream(out_stream)
batch_writer = pa.RecordBatchStreamWriter(self._resettable_io, self._schema)
batch_writer.write_batch(
pandas_to_arrow(self._schema, self._timezone, self._field_types, cols))
def decode_from_stream(self, in_stream: InputStream, length=0):
return self.decode_one_batch_from_stream(in_stream, length)
def decode_one_batch_from_stream(self, in_stream: InputStream, size: int) -> List:
self._resettable_io.set_input_bytes(in_stream.read(size))
# there is only one arrow batch in the underlying input stream
return arrow_to_pandas(self._timezone, self._field_types, [next(self._batch_reader)])
@staticmethod
def _load_from_stream(stream):
import pyarrow as pa
while stream.readable():
reader = pa.ipc.open_stream(stream)
yield reader.read_next_batch()
def __repr__(self):
return 'ArrowCoderImpl[%s]' % self._schema
class OverWindowArrowCoderImpl(FieldCoderImpl):
"""
A coder for over window with arrow format data.
The data structure: [window data][arrow format data].
"""
def __init__(self, arrow_coder_impl: ArrowCoderImpl):
self._arrow_coder = arrow_coder_impl
self._int_coder = IntCoderImpl()
def encode_to_stream(self, cols, out_stream: OutputStream):
self._arrow_coder.encode_to_stream(cols, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
window_num = self._int_coder.decode_from_stream(in_stream)
length -= 4
window_boundaries_and_arrow_data = []
for _ in range(window_num):
window_size = self._int_coder.decode_from_stream(in_stream)
length -= 4
window_boundaries_and_arrow_data.append(
[self._int_coder.decode_from_stream(in_stream)
for _ in range(window_size)])
length -= 4 * window_size
window_boundaries_and_arrow_data.append(
self._arrow_coder.decode_one_batch_from_stream(in_stream, length))
return window_boundaries_and_arrow_data
def __repr__(self):
return 'OverWindowArrowCoderImpl[%s]' % self._arrow_coder
class TinyIntCoderImpl(FieldCoderImpl):
"""
A coder for tiny int value (from -128 to 127).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int8(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int8()
class SmallIntCoderImpl(FieldCoderImpl):
"""
A coder for small int value (from -32,768 to 32,767).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int16(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int16()
class IntCoderImpl(FieldCoderImpl):
"""
A coder for int value (from -2,147,483,648 to 2,147,483,647).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int32()
class BigIntCoderImpl(FieldCoderImpl):
"""
A coder for big int value (from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int64(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_int64()
class BooleanCoderImpl(FieldCoderImpl):
"""
A coder for a boolean value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_byte(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return not not in_stream.read_byte()
class FloatCoderImpl(FieldCoderImpl):
"""
A coder for a float value (4-byte single precision floating point number).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_float(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_float()
class DoubleCoderImpl(FieldCoderImpl):
"""
A coder for a double value (8-byte double precision floating point number).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_double(value)
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_double()
class BinaryCoderImpl(FieldCoderImpl):
"""
A coder for a bytes value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_bytes(value, len(value))
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_bytes()
class CharCoderImpl(FieldCoderImpl):
"""
A coder for a str value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
bytes_value = value.encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
def decode_from_stream(self, in_stream: InputStream, length=0):
return in_stream.read_bytes().decode("utf-8")
class DecimalCoderImpl(FieldCoderImpl):
"""
A coder for a decimal value (with fixed precision and scale).
"""
def __init__(self, precision, scale):
self.context = decimal.Context(prec=precision)
self.scale_format = decimal.Decimal(10) ** -scale
def encode_to_stream(self, value, out_stream: OutputStream):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = value.quantize(self.scale_format)
bytes_value = str(value).encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
decimal.setcontext(user_context)
def decode_from_stream(self, in_stream: InputStream, length=0):
user_context = decimal.getcontext()
decimal.setcontext(self.context)
value = decimal.Decimal(in_stream.read_bytes().decode("utf-8")).quantize(self.scale_format)
decimal.setcontext(user_context)
return value
class BigDecimalCoderImpl(FieldCoderImpl):
"""
A coder for a big decimal value (without fixed precision and scale).
"""
def encode_to_stream(self, value, out_stream: OutputStream):
bytes_value = str(value).encode("utf-8")
out_stream.write_bytes(bytes_value, len(bytes_value))
def decode_from_stream(self, in_stream: InputStream, length=0):
return decimal.Decimal(in_stream.read_bytes().decode("utf-8"))
class DateCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.date value.
"""
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(self.date_to_internal(value))
def decode_from_stream(self, in_stream: InputStream, length=0):
value = in_stream.read_int32()
return self.internal_to_date(value)
def date_to_internal(self, d):
return d.toordinal() - self.EPOCH_ORDINAL
def internal_to_date(self, v):
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
class TimeCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.time value.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(self.time_to_internal(value))
def decode_from_stream(self, in_stream: InputStream, length=0):
value = in_stream.read_int32()
return self.internal_to_time(value)
@staticmethod
def time_to_internal(t):
milliseconds = (t.hour * 3600000
+ t.minute * 60000
+ t.second * 1000
+ t.microsecond // 1000)
return milliseconds
@staticmethod
def internal_to_time(v):
seconds, milliseconds = divmod(v, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, milliseconds * 1000)
class TimestampCoderImpl(FieldCoderImpl):
"""
A coder for a datetime.datetime value.
"""
def __init__(self, precision):
self.precision = precision
def is_compact(self):
return self.precision <= 3
def encode_to_stream(self, value, out_stream: OutputStream):
milliseconds, nanoseconds = self.timestamp_to_internal(value)
if self.is_compact():
assert nanoseconds == 0
out_stream.write_int64(milliseconds)
else:
out_stream.write_int64(milliseconds)
out_stream.write_int32(nanoseconds)
def decode_from_stream(self, in_stream: InputStream, length=0):
if self.is_compact():
milliseconds = in_stream.read_int64()
nanoseconds = 0
else:
milliseconds = in_stream.read_int64()
nanoseconds = in_stream.read_int32()
return self.internal_to_timestamp(milliseconds, nanoseconds)
@staticmethod
def timestamp_to_internal(timestamp):
seconds = int(timestamp.replace(tzinfo=datetime.timezone.utc).timestamp())
microseconds_of_second = timestamp.microsecond
milliseconds = seconds * 1000 + microseconds_of_second // 1000
nanoseconds = microseconds_of_second % 1000 * 1000
return milliseconds, nanoseconds
def internal_to_timestamp(self, milliseconds, nanoseconds):
second, microsecond = (milliseconds // 1000,
milliseconds % 1000 * 1000 + nanoseconds // 1000)
return datetime.datetime.utcfromtimestamp(second).replace(microsecond=microsecond)
class LocalZonedTimestampCoderImpl(TimestampCoderImpl):
"""
A coder for a datetime.datetime with time zone value.
"""
def __init__(self, precision, timezone):
super(LocalZonedTimestampCoderImpl, self).__init__(precision)
self.timezone = timezone
def internal_to_timestamp(self, milliseconds, nanoseconds):
return self.timezone.localize(
super(LocalZonedTimestampCoderImpl, self).internal_to_timestamp(
milliseconds, nanoseconds))
class InstantCoderImpl(FieldCoderImpl):
"""
A coder for Instant.
"""
def __init__(self):
self._null_seconds = -9223372036854775808
self._null_nanos = -2147483648
def encode_to_stream(self, value: Instant, out_stream: OutputStream):
if value is None:
out_stream.write_int64(self._null_seconds)
out_stream.write_int32(self._null_nanos)
else:
out_stream.write_int64(value.seconds)
out_stream.write_int32(value.nanos)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
seconds = in_stream.read_int64()
nanos = in_stream.read_int32()
if seconds == self._null_seconds and nanos == self._null_nanos:
return None
else:
return Instant(seconds, nanos)
class CloudPickleCoderImpl(FieldCoderImpl):
"""
A coder used with cloudpickle for all kinds of python object.
"""
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream: OutputStream):
coded_data = cloudpickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
return self._decode_one_value_from_stream(in_stream)
def _decode_one_value_from_stream(self, in_stream: InputStream):
real_data = self.field_coder.decode_from_stream(in_stream)
value = cloudpickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'CloudPickleCoderImpl[%s]' % str(self.field_coder)
class PickleCoderImpl(FieldCoderImpl):
"""
A coder used with pickle for all kinds of python object.
"""
def __init__(self):
self.field_coder = BinaryCoderImpl()
def encode_to_stream(self, value, out_stream: OutputStream):
coded_data = pickle.dumps(value)
self.field_coder.encode_to_stream(coded_data, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
real_data = self.field_coder.decode_from_stream(in_stream)
value = pickle.loads(real_data)
return value
def __repr__(self) -> str:
return 'PickleCoderImpl[%s]' % str(self.field_coder)
class TupleCoderImpl(FieldCoderImpl):
"""
A coder for a tuple value.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
self._field_count = len(field_coders)
def encode_to_stream(self, value, out_stream: OutputStream):
field_coders = self._field_coders
for i in range(self._field_count):
field_coders[i].encode_to_stream(value[i], out_stream)
def decode_from_stream(self, stream: InputStream, length=0):
decoded_list = [field_coder.decode_from_stream(stream)
for field_coder in self._field_coders]
return (*decoded_list,)
def __repr__(self) -> str:
return 'TupleCoderImpl[%s]' % ', '.join(str(c) for c in self._field_coders)
class GenericArrayCoderImpl(FieldCoderImpl):
"""
A coder for object array value (the element of array could be any kind of Python object).
"""
def __init__(self, elem_coder: FieldCoderImpl):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(len(value))
for elem in value:
if elem is None:
out_stream.write_byte(False)
else:
out_stream.write_byte(True)
self._elem_coder.encode_to_stream(elem, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
size = in_stream.read_int32()
elements = [self._elem_coder.decode_from_stream(in_stream)
if in_stream.read_byte() else None for _ in range(size)]
return elements
def __repr__(self):
return 'GenericArrayCoderImpl[%s]' % repr(self._elem_coder)
class PrimitiveArrayCoderImpl(FieldCoderImpl):
"""
A coder for primitive array value (the element of array won't be null).
"""
def __init__(self, elem_coder: FieldCoderImpl):
self._elem_coder = elem_coder
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int32(len(value))
for elem in value:
self._elem_coder.encode_to_stream(elem, out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
size = in_stream.read_int32()
elements = [self._elem_coder.decode_from_stream(in_stream) for _ in range(size)]
return elements
def __repr__(self):
return 'PrimitiveArrayCoderImpl[%s]' % repr(self._elem_coder)
class MapCoderImpl(FieldCoderImpl):
"""
A coder for map value (dict with same type key and same type value).
"""
def __init__(self, key_coder: FieldCoderImpl, value_coder: FieldCoderImpl):
self._key_coder = key_coder
self._value_coder = value_coder
def encode_to_stream(self, map_value, out_stream: OutputStream):
out_stream.write_int32(len(map_value))
for key in map_value:
self._key_coder.encode_to_stream(key, out_stream)
value = map_value[key]
if value is None:
out_stream.write_byte(True)
else:
out_stream.write_byte(False)
self._value_coder.encode_to_stream(map_value[key], out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
size = in_stream.read_int32()
map_value = {}
for _ in range(size):
key = self._key_coder.decode_from_stream(in_stream)
is_null = in_stream.read_byte()
if is_null:
map_value[key] = None
else:
value = self._value_coder.decode_from_stream(in_stream)
map_value[key] = value
return map_value
def __repr__(self):
return 'MapCoderImpl[%s]' % ' : '.join([repr(self._key_coder), repr(self._value_coder)])
class TimeWindowCoderImpl(FieldCoderImpl):
"""
A coder for TimeWindow.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int64(value.start)
out_stream.write_int64(value.end)
def decode_from_stream(self, in_stream: InputStream, length=0):
start = in_stream.read_int64()
end = in_stream.read_int64()
return TimeWindow(start, end)
class CountWindowCoderImpl(FieldCoderImpl):
"""
A coder for CountWindow.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_int64(value.id)
def decode_from_stream(self, in_stream: InputStream, length=0):
return CountWindow(in_stream.read_int64())
class GlobalWindowCoderImpl(FieldCoderImpl):
"""
A coder for CountWindow.
"""
def encode_to_stream(self, value, out_stream: OutputStream):
out_stream.write_byte(0)
def decode_from_stream(self, in_stream: InputStream, length=0):
in_stream.read_byte()
return GlobalWindowCoderImpl()
class DataViewFilterCoderImpl(FieldCoderImpl):
"""
A coder for data view filter.
"""
def __init__(self, udf_data_view_specs):
self._udf_data_view_specs = udf_data_view_specs
self._pickle_coder = PickleCoderImpl()
def encode_to_stream(self, value, out_stream: OutputStream):
self._pickle_coder.encode_to_stream(self._filter_data_views(value), out_stream)
def decode_from_stream(self, in_stream: InputStream, length=0):
return self._pickle_coder.decode_from_stream(in_stream)
def _filter_data_views(self, row):
i = 0
for specs in self._udf_data_view_specs:
for spec in specs:
row[i][spec.field_index] = None
i += 1
return row
class AvroCoderImpl(FieldCoderImpl):
def __init__(self, schema_string: str):
self._buffer_wrapper = FlinkAvroBufferWrapper()
self._schema = avro_schema.parse(schema_string)
self._decoder = FlinkAvroDecoder(self._buffer_wrapper)
self._encoder = FlinkAvroEncoder(self._buffer_wrapper)
self._reader = FlinkAvroDatumReader(writer_schema=self._schema, reader_schema=self._schema)
self._writer = FlinkAvroDatumWriter(writer_schema=self._schema)
def encode_to_stream(self, value, out_stream: OutputStream):
self._buffer_wrapper.switch_stream(out_stream)
self._writer.write(value, self._encoder)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
# Since writer_schema equals reader_schema, in_stream does not need to support seek and tell
self._buffer_wrapper.switch_stream(in_stream)
return self._reader.read(self._decoder)
class LocalDateCoderImpl(FieldCoderImpl):
@staticmethod
def _encode_to_stream(value: datetime.date, out_stream: OutputStream):
if value is None:
out_stream.write_int32(0xFFFFFFFF)
out_stream.write_int16(0xFFFF)
else:
out_stream.write_int32(value.year)
out_stream.write_int8(value.month)
out_stream.write_int8(value.day)
@staticmethod
def _decode_from_stream(in_stream: InputStream):
year = in_stream.read_int32()
if year == 0xFFFFFFFF:
in_stream.read(2)
return None
month = in_stream.read_int8()
day = in_stream.read_int8()
return datetime.date(year, month, day)
def encode_to_stream(self, value: datetime.date, out_stream: OutputStream):
self._encode_to_stream(value, out_stream)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
return self._decode_from_stream(in_stream)
class LocalTimeCoderImpl(FieldCoderImpl):
@staticmethod
def _encode_to_stream(value: datetime.time, out_stream: OutputStream):
if value is None:
out_stream.write_int8(0xFF)
out_stream.write_int16(0xFFFF)
out_stream.write_int32(0xFFFFFFFF)
else:
out_stream.write_int8(value.hour)
out_stream.write_int8(value.minute)
out_stream.write_int8(value.second)
out_stream.write_int32(value.microsecond * 1000)
@staticmethod
def _decode_from_stream(in_stream: InputStream):
hour = in_stream.read_int8()
if hour == 0xFF:
in_stream.read(6)
return None
minute = in_stream.read_int8()
second = in_stream.read_int8()
nano = in_stream.read_int32()
return datetime.time(hour, minute, second, nano // 1000)
def encode_to_stream(self, value: datetime.time, out_stream: OutputStream):
self._encode_to_stream(value, out_stream)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
return self._decode_from_stream(in_stream)
class LocalDateTimeCoderImpl(FieldCoderImpl):
def encode_to_stream(self, value: datetime.datetime, out_stream: OutputStream):
if value is None:
LocalDateCoderImpl._encode_to_stream(None, out_stream)
LocalTimeCoderImpl._encode_to_stream(None, out_stream)
else:
LocalDateCoderImpl._encode_to_stream(value.date(), out_stream)
LocalTimeCoderImpl._encode_to_stream(value.time(), out_stream)
def decode_from_stream(self, in_stream: InputStream, length: int = 0):
date = LocalDateCoderImpl._decode_from_stream(in_stream)
time = LocalTimeCoderImpl._decode_from_stream(in_stream)
if date is None or time is None:
return None
return datetime.datetime(date.year, date.month, date.day, time.hour, time.minute,
time.second, time.microsecond)
| 33,406 | 34.463907 | 141 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/ResettableIO.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import io
class ResettableIO(io.RawIOBase):
"""
Raw I/O implementation the input and output stream is resettable.
"""
def set_input_bytes(self, b):
self._input_bytes = b
self._input_offset = 0
self._size = len(b)
def readinto(self, b):
"""
Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If no bytes are available, None is returned.
"""
output_buffer_len = len(b)
remaining = self._size - self._input_offset
if remaining >= output_buffer_len:
b[:] = self._input_bytes[self._input_offset:self._input_offset + output_buffer_len]
self._input_offset += output_buffer_len
return output_buffer_len
elif remaining > 0:
b[:remaining] = self._input_bytes[self._input_offset:self._input_offset + remaining]
self._input_offset = self._size
return remaining
else:
return None
def set_output_stream(self, output_stream):
self._output_stream = output_stream
def write(self, b):
"""
Write the given bytes or pyarrow.Buffer object *b* to the underlying
output stream and return the number of bytes written.
"""
if isinstance(b, bytes):
self._output_stream.write(b)
else:
# pyarrow.Buffer
self._output_stream.write(b.to_pybytes())
return len(b)
def seekable(self):
return False
def readable(self):
return self._size - self._input_offset
def writable(self):
return True
| 2,606 | 34.712329 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/coders.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
from abc import ABC, abstractmethod
from typing import Union
import pytz
from pyflink import fn_execution
if fn_execution.PYFLINK_CYTHON_ENABLED:
from pyflink.fn_execution import coder_impl_fast as coder_impl
else:
from pyflink.fn_execution import coder_impl_slow as coder_impl
from pyflink.datastream.formats.avro import GenericRecordAvroTypeInfo, AvroSchema
from pyflink.common.typeinfo import TypeInformation, BasicTypeInfo, BasicType, DateTypeInfo, \
TimeTypeInfo, TimestampTypeInfo, PrimitiveArrayTypeInfo, BasicArrayTypeInfo, TupleTypeInfo, \
MapTypeInfo, ListTypeInfo, RowTypeInfo, PickledBytesTypeInfo, ObjectArrayTypeInfo, \
ExternalTypeInfo
from pyflink.table.types import TinyIntType, SmallIntType, IntType, BigIntType, BooleanType, \
FloatType, DoubleType, VarCharType, VarBinaryType, DecimalType, DateType, TimeType, \
LocalZonedTimestampType, RowType, RowField, to_arrow_type, TimestampType, ArrayType, MapType, \
BinaryType, NullType, CharType
__all__ = ['FlattenRowCoder', 'RowCoder', 'BigIntCoder', 'TinyIntCoder', 'BooleanCoder',
'SmallIntCoder', 'IntCoder', 'FloatCoder', 'DoubleCoder', 'BinaryCoder', 'CharCoder',
'DateCoder', 'TimeCoder', 'TimestampCoder', 'LocalZonedTimestampCoder', 'InstantCoder',
'GenericArrayCoder', 'PrimitiveArrayCoder', 'MapCoder', 'DecimalCoder',
'BigDecimalCoder', 'TupleCoder', 'TimeWindowCoder', 'CountWindowCoder',
'PickleCoder', 'CloudPickleCoder', 'DataViewFilterCoder']
#########################################################################
# Top-level coder: ValueCoder & IterableCoder
#########################################################################
# LengthPrefixBaseCoder is the top level coder and the other coders will be used as the field coder
class LengthPrefixBaseCoder(ABC):
def __init__(self, field_coder: 'FieldCoder'):
self._field_coder = field_coder
@abstractmethod
def get_impl(self):
pass
@classmethod
def from_coder_info_descriptor_proto(cls, coder_info_descriptor_proto):
from pyflink.fn_execution import flink_fn_execution_pb2
field_coder = cls._to_field_coder(coder_info_descriptor_proto)
mode = coder_info_descriptor_proto.mode
separated_with_end_message = coder_info_descriptor_proto.separated_with_end_message
if mode == flink_fn_execution_pb2.CoderInfoDescriptor.SINGLE:
return ValueCoder(field_coder)
else:
return IterableCoder(field_coder, separated_with_end_message)
@classmethod
def _to_field_coder(cls, coder_info_descriptor_proto):
if coder_info_descriptor_proto.HasField('flatten_row_type'):
schema_proto = coder_info_descriptor_proto.flatten_row_type.schema
field_coders = [from_proto(f.type) for f in schema_proto.fields]
return FlattenRowCoder(field_coders)
elif coder_info_descriptor_proto.HasField('row_type'):
schema_proto = coder_info_descriptor_proto.row_type.schema
field_coders = [from_proto(f.type) for f in schema_proto.fields]
field_names = [f.name for f in schema_proto.fields]
return RowCoder(field_coders, field_names)
elif coder_info_descriptor_proto.HasField('arrow_type'):
timezone = pytz.timezone(os.environ['TABLE_LOCAL_TIME_ZONE'])
schema_proto = coder_info_descriptor_proto.arrow_type.schema
row_type = cls._to_row_type(schema_proto)
return ArrowCoder(cls._to_arrow_schema(row_type), row_type, timezone)
elif coder_info_descriptor_proto.HasField('over_window_arrow_type'):
timezone = pytz.timezone(os.environ['TABLE_LOCAL_TIME_ZONE'])
schema_proto = coder_info_descriptor_proto.over_window_arrow_type.schema
row_type = cls._to_row_type(schema_proto)
return OverWindowArrowCoder(
cls._to_arrow_schema(row_type), row_type, timezone)
elif coder_info_descriptor_proto.HasField('raw_type'):
type_info_proto = coder_info_descriptor_proto.raw_type.type_info
field_coder = from_type_info_proto(type_info_proto)
return field_coder
else:
raise ValueError("Unexpected coder type %s" % coder_info_descriptor_proto)
@classmethod
def _to_arrow_schema(cls, row_type):
import pyarrow as pa
return pa.schema([pa.field(n, to_arrow_type(t), t._nullable)
for n, t in zip(row_type.field_names(), row_type.field_types())])
@classmethod
def _to_data_type(cls, field_type):
from pyflink.fn_execution import flink_fn_execution_pb2
if field_type.type_name == flink_fn_execution_pb2.Schema.TINYINT:
return TinyIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.SMALLINT:
return SmallIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.INT:
return IntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BIGINT:
return BigIntType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BOOLEAN:
return BooleanType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.FLOAT:
return FloatType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DOUBLE:
return DoubleType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.CHAR:
return CharType(field_type.char_info.length, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.VARCHAR:
return VarCharType(field_type.var_char_info.length, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BINARY:
return BinaryType(field_type.binary_info.length, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.VARBINARY:
return VarBinaryType(field_type.var_binary_info.length, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DECIMAL:
return DecimalType(field_type.decimal_info.precision,
field_type.decimal_info.scale,
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.DATE:
return DateType(field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TIME:
return TimeType(field_type.time_info.precision, field_type.nullable)
elif field_type.type_name == \
flink_fn_execution_pb2.Schema.LOCAL_ZONED_TIMESTAMP:
return LocalZonedTimestampType(field_type.local_zoned_timestamp_info.precision,
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TIMESTAMP:
return TimestampType(field_type.timestamp_info.precision, field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.BASIC_ARRAY:
return ArrayType(cls._to_data_type(field_type.collection_element_type),
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TypeName.ROW:
return RowType(
[RowField(f.name, cls._to_data_type(f.type), f.description)
for f in field_type.row_schema.fields], field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TypeName.MAP:
return MapType(cls._to_data_type(field_type.map_info.key_type),
cls._to_data_type(field_type.map_info.value_type),
field_type.nullable)
elif field_type.type_name == flink_fn_execution_pb2.Schema.TypeName.NULL:
return NullType()
else:
raise ValueError("field_type %s is not supported." % field_type)
@classmethod
def _to_row_type(cls, row_schema):
return RowType([RowField(f.name, cls._to_data_type(f.type)) for f in row_schema.fields])
class IterableCoder(LengthPrefixBaseCoder):
"""
Coder for iterable data.
"""
def __init__(self, field_coder: 'FieldCoder', separated_with_end_message):
super(IterableCoder, self).__init__(field_coder)
self._separated_with_end_message = separated_with_end_message
def get_impl(self):
return coder_impl.IterableCoderImpl(self._field_coder.get_impl(),
self._separated_with_end_message)
class ValueCoder(LengthPrefixBaseCoder):
"""
Coder for single data.
"""
def __init__(self, field_coder: 'FieldCoder'):
super(ValueCoder, self).__init__(field_coder)
def get_impl(self):
return coder_impl.ValueCoderImpl(self._field_coder.get_impl())
#########################################################################
# Low-level coder: FieldCoder
#########################################################################
class FieldCoder(ABC):
def get_impl(self) -> coder_impl.FieldCoderImpl:
pass
def __eq__(self, other):
return type(self) == type(other)
class FlattenRowCoder(FieldCoder):
"""
Coder for Row. The decoded result will be flattened as a list of column values of a row instead
of a row object.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
def get_impl(self):
return coder_impl.FlattenRowCoderImpl([c.get_impl() for c in self._field_coders])
def __repr__(self):
return 'FlattenRowCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
def __eq__(self, other: 'FlattenRowCoder'):
return (self.__class__ == other.__class__
and len(self._field_coders) == len(other._field_coders)
and [self._field_coders[i] == other._field_coders[i] for i in
range(len(self._field_coders))])
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._field_coders)
class ArrowCoder(FieldCoder):
"""
Coder for Arrow.
"""
def __init__(self, schema, row_type, timezone):
self._schema = schema
self._row_type = row_type
self._timezone = timezone
def get_impl(self):
return coder_impl.ArrowCoderImpl(self._schema, self._row_type, self._timezone)
def __repr__(self):
return 'ArrowCoder[%s]' % self._schema
class OverWindowArrowCoder(FieldCoder):
"""
Coder for batch pandas over window aggregation.
"""
def __init__(self, schema, row_type, timezone):
self._arrow_coder = ArrowCoder(schema, row_type, timezone)
def get_impl(self):
return coder_impl.OverWindowArrowCoderImpl(self._arrow_coder.get_impl())
def __repr__(self):
return 'OverWindowArrowCoder[%s]' % self._arrow_coder
class RowCoder(FieldCoder):
"""
Coder for Row.
"""
def __init__(self, field_coders, field_names):
self._field_coders = field_coders
self._field_names = field_names
def get_impl(self):
return coder_impl.RowCoderImpl([c.get_impl() for c in self._field_coders],
self._field_names)
def __repr__(self):
return 'RowCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
def __eq__(self, other: 'RowCoder'):
return (self.__class__ == other.__class__
and self._field_names == other._field_names
and [self._field_coders[i] == other._field_coders[i] for i in
range(len(self._field_coders))])
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._field_coders)
class CollectionCoder(FieldCoder):
"""
Base coder for collection.
"""
def __init__(self, elem_coder):
self._elem_coder = elem_coder
def is_deterministic(self):
return self._elem_coder.is_deterministic()
def __eq__(self, other: 'CollectionCoder'):
return (self.__class__ == other.__class__
and self._elem_coder == other._elem_coder)
def __repr__(self):
return '%s[%s]' % (self.__class__.__name__, repr(self._elem_coder))
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._elem_coder)
class GenericArrayCoder(CollectionCoder):
"""
Coder for generic array such as basic array or object array.
"""
def __init__(self, elem_coder):
super(GenericArrayCoder, self).__init__(elem_coder)
def get_impl(self):
return coder_impl.GenericArrayCoderImpl(self._elem_coder.get_impl())
class PrimitiveArrayCoder(CollectionCoder):
"""
Coder for Primitive Array.
"""
def __init__(self, elem_coder):
super(PrimitiveArrayCoder, self).__init__(elem_coder)
def get_impl(self):
return coder_impl.PrimitiveArrayCoderImpl(self._elem_coder.get_impl())
class MapCoder(FieldCoder):
"""
Coder for Map.
"""
def __init__(self, key_coder, value_coder):
self._key_coder = key_coder
self._value_coder = value_coder
def get_impl(self):
return coder_impl.MapCoderImpl(self._key_coder.get_impl(), self._value_coder.get_impl())
def is_deterministic(self):
return self._key_coder.is_deterministic() and self._value_coder.is_deterministic()
def __repr__(self):
return 'MapCoder[%s]' % ','.join([repr(self._key_coder), repr(self._value_coder)])
def __eq__(self, other: 'MapCoder'):
return (self.__class__ == other.__class__
and self._key_coder == other._key_coder
and self._value_coder == other._value_coder)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash([self._key_coder, self._value_coder])
class BigIntCoder(FieldCoder):
"""
Coder for 8 bytes long.
"""
def get_impl(self):
return coder_impl.BigIntCoderImpl()
class TinyIntCoder(FieldCoder):
"""
Coder for Byte.
"""
def get_impl(self):
return coder_impl.TinyIntCoderImpl()
class BooleanCoder(FieldCoder):
"""
Coder for Boolean.
"""
def get_impl(self):
return coder_impl.BooleanCoderImpl()
class SmallIntCoder(FieldCoder):
"""
Coder for Short.
"""
def get_impl(self):
return coder_impl.SmallIntCoderImpl()
class IntCoder(FieldCoder):
"""
Coder for 4 bytes int.
"""
def get_impl(self):
return coder_impl.IntCoderImpl()
class FloatCoder(FieldCoder):
"""
Coder for Float.
"""
def get_impl(self):
return coder_impl.FloatCoderImpl()
class DoubleCoder(FieldCoder):
"""
Coder for Double.
"""
def get_impl(self):
return coder_impl.DoubleCoderImpl()
class DecimalCoder(FieldCoder):
"""
Coder for Decimal.
"""
def __init__(self, precision, scale):
self.precision = precision
self.scale = scale
def get_impl(self):
return coder_impl.DecimalCoderImpl(self.precision, self.scale)
def __eq__(self, other: 'DecimalCoder'):
return (self.__class__ == other.__class__ and
self.precision == other.precision and
self.scale == other.scale)
class BigDecimalCoder(FieldCoder):
"""
Coder for Basic Decimal that no need to have precision and scale specified.
"""
def get_impl(self):
return coder_impl.BigDecimalCoderImpl()
class BinaryCoder(FieldCoder):
"""
Coder for Byte Array.
"""
def get_impl(self):
return coder_impl.BinaryCoderImpl()
class CharCoder(FieldCoder):
"""
Coder for Character String.
"""
def get_impl(self):
return coder_impl.CharCoderImpl()
class DateCoder(FieldCoder):
"""
Coder for Date
"""
def get_impl(self):
return coder_impl.DateCoderImpl()
class TimeCoder(FieldCoder):
"""
Coder for Time.
"""
def get_impl(self):
return coder_impl.TimeCoderImpl()
class TimestampCoder(FieldCoder):
"""
Coder for Timestamp.
"""
def __init__(self, precision):
self.precision = precision
def get_impl(self):
return coder_impl.TimestampCoderImpl(self.precision)
def __eq__(self, other: 'TimestampCoder'):
return self.__class__ == other.__class__ and self.precision == other.precision
class LocalZonedTimestampCoder(FieldCoder):
"""
Coder for LocalZonedTimestamp.
"""
def __init__(self, precision, timezone):
self.precision = precision
self.timezone = timezone
def get_impl(self):
return coder_impl.LocalZonedTimestampCoderImpl(self.precision, self.timezone)
def __eq__(self, other: 'LocalZonedTimestampCoder'):
return (self.__class__ == other.__class__ and
self.precision == other.precision and
self.timezone == other.timezone)
class InstantCoder(FieldCoder):
"""
Coder for Instant.
"""
def get_impl(self) -> coder_impl.FieldCoderImpl:
return coder_impl.InstantCoderImpl()
class CloudPickleCoder(FieldCoder):
"""
Coder used with cloudpickle to encode python object.
"""
def get_impl(self):
return coder_impl.CloudPickleCoderImpl()
class PickleCoder(FieldCoder):
"""
Coder used with pickle to encode python object.
"""
def get_impl(self):
return coder_impl.PickleCoderImpl()
class TupleCoder(FieldCoder):
"""
Coder for Tuple.
"""
def __init__(self, field_coders):
self._field_coders = field_coders
def get_impl(self):
return coder_impl.TupleCoderImpl([c.get_impl() for c in self._field_coders])
def __repr__(self):
return 'TupleCoder[%s]' % ', '.join(str(c) for c in self._field_coders)
def __eq__(self, other: 'TupleCoder'):
return (self.__class__ == other.__class__ and
[self._field_coders[i] == other._field_coders[i]
for i in range(len(self._field_coders))])
class TimeWindowCoder(FieldCoder):
"""
Coder for TimeWindow.
"""
def get_impl(self):
return coder_impl.TimeWindowCoderImpl()
class CountWindowCoder(FieldCoder):
"""
Coder for CountWindow.
"""
def get_impl(self):
return coder_impl.CountWindowCoderImpl()
class GlobalWindowCoder(FieldCoder):
"""
Coder for GlobalWindow.
"""
def get_impl(self):
return coder_impl.GlobalWindowCoderImpl()
class DataViewFilterCoder(FieldCoder):
"""
Coder for data view filter.
"""
def __init__(self, udf_data_view_specs):
self._udf_data_view_specs = udf_data_view_specs
def get_impl(self):
return coder_impl.DataViewFilterCoderImpl(self._udf_data_view_specs)
class AvroCoder(FieldCoder):
def __init__(self, schema: Union[str, AvroSchema]):
if isinstance(schema, str):
self._schema_string = schema
elif isinstance(schema, AvroSchema):
self._schema_string = str(schema)
else:
raise ValueError('schema for AvroCoder must be string or AvroSchema')
def get_impl(self):
return coder_impl.AvroCoderImpl(self._schema_string)
class LocalDateCoder(FieldCoder):
def get_impl(self):
return coder_impl.LocalDateCoderImpl()
class LocalTimeCoder(FieldCoder):
def get_impl(self):
return coder_impl.LocalTimeCoderImpl()
class LocalDateTimeCoder(FieldCoder):
def get_impl(self):
return coder_impl.LocalDateTimeCoderImpl()
def from_proto(field_type):
"""
Creates the corresponding :class:`Coder` given the protocol representation of the field type.
:param field_type: the protocol representation of the field type
:return: :class:`Coder`
"""
from pyflink.fn_execution import flink_fn_execution_pb2
type_name = flink_fn_execution_pb2.Schema
_type_name_mappings = {
type_name.TINYINT: TinyIntCoder(),
type_name.SMALLINT: SmallIntCoder(),
type_name.INT: IntCoder(),
type_name.BIGINT: BigIntCoder(),
type_name.BOOLEAN: BooleanCoder(),
type_name.FLOAT: FloatCoder(),
type_name.DOUBLE: DoubleCoder(),
type_name.BINARY: BinaryCoder(),
type_name.VARBINARY: BinaryCoder(),
type_name.CHAR: CharCoder(),
type_name.VARCHAR: CharCoder(),
type_name.DATE: DateCoder(),
type_name.TIME: TimeCoder(),
}
field_type_name = field_type.type_name
coder = _type_name_mappings.get(field_type_name)
if coder is not None:
return coder
if field_type_name == type_name.ROW:
return RowCoder([from_proto(f.type) for f in field_type.row_schema.fields],
[f.name for f in field_type.row_schema.fields])
if field_type_name == type_name.TIMESTAMP:
return TimestampCoder(field_type.timestamp_info.precision)
if field_type_name == type_name.LOCAL_ZONED_TIMESTAMP:
timezone = pytz.timezone(os.environ['TABLE_LOCAL_TIME_ZONE'])
return LocalZonedTimestampCoder(field_type.local_zoned_timestamp_info.precision, timezone)
elif field_type_name == type_name.BASIC_ARRAY:
return GenericArrayCoder(from_proto(field_type.collection_element_type))
elif field_type_name == type_name.MAP:
return MapCoder(from_proto(field_type.map_info.key_type),
from_proto(field_type.map_info.value_type))
elif field_type_name == type_name.DECIMAL:
return DecimalCoder(field_type.decimal_info.precision,
field_type.decimal_info.scale)
else:
raise ValueError("field_type %s is not supported." % field_type)
def from_type_info_proto(type_info):
# for data stream type information.
from pyflink.fn_execution import flink_fn_execution_pb2
type_info_name = flink_fn_execution_pb2.TypeInfo
_type_info_name_mappings = {
type_info_name.STRING: CharCoder(),
type_info_name.BYTE: TinyIntCoder(),
type_info_name.BOOLEAN: BooleanCoder(),
type_info_name.SHORT: SmallIntCoder(),
type_info_name.INT: IntCoder(),
type_info_name.LONG: BigIntCoder(),
type_info_name.FLOAT: FloatCoder(),
type_info_name.DOUBLE: DoubleCoder(),
type_info_name.CHAR: CharCoder(),
type_info_name.BIG_INT: BigIntCoder(),
type_info_name.BIG_DEC: BigDecimalCoder(),
type_info_name.SQL_DATE: DateCoder(),
type_info_name.SQL_TIME: TimeCoder(),
type_info_name.SQL_TIMESTAMP: TimestampCoder(3),
type_info_name.PICKLED_BYTES: CloudPickleCoder(),
type_info_name.INSTANT: InstantCoder(),
type_info_name.LOCAL_DATE: LocalDateCoder(),
type_info_name.LOCAL_TIME: LocalTimeCoder(),
type_info_name.LOCAL_DATETIME: LocalDateTimeCoder(),
}
field_type_name = type_info.type_name
try:
return _type_info_name_mappings[field_type_name]
except KeyError:
if field_type_name == type_info_name.ROW:
return RowCoder(
[from_type_info_proto(f.field_type) for f in type_info.row_type_info.fields],
[f.field_name for f in type_info.row_type_info.fields])
elif field_type_name in (
type_info_name.PRIMITIVE_ARRAY,
type_info_name.LIST,
):
if type_info.collection_element_type.type_name == type_info_name.BYTE:
return BinaryCoder()
return PrimitiveArrayCoder(from_type_info_proto(type_info.collection_element_type))
elif field_type_name in (
type_info_name.BASIC_ARRAY,
type_info_name.OBJECT_ARRAY,
):
return GenericArrayCoder(from_type_info_proto(type_info.collection_element_type))
elif field_type_name == type_info_name.TUPLE:
return TupleCoder([from_type_info_proto(field_type)
for field_type in type_info.tuple_type_info.field_types])
elif field_type_name == type_info_name.MAP:
return MapCoder(from_type_info_proto(type_info.map_type_info.key_type),
from_type_info_proto(type_info.map_type_info.value_type))
elif field_type_name == type_info_name.AVRO:
return AvroCoder(type_info.avro_type_info.schema)
elif field_type_name == type_info_name.LOCAL_ZONED_TIMESTAMP:
return LocalZonedTimestampCoder(
3, timezone=pytz.timezone(os.environ['TABLE_LOCAL_TIME_ZONE'])
)
else:
raise ValueError("Unsupported type_info %s." % type_info)
_basic_type_info_mappings = {
BasicType.BYTE: TinyIntCoder(),
BasicType.BOOLEAN: BooleanCoder(),
BasicType.SHORT: SmallIntCoder(),
BasicType.INT: IntCoder(),
BasicType.LONG: BigIntCoder(),
BasicType.BIG_INT: BigIntCoder(),
BasicType.FLOAT: FloatCoder(),
BasicType.DOUBLE: DoubleCoder(),
BasicType.STRING: CharCoder(),
BasicType.CHAR: CharCoder(),
BasicType.BIG_DEC: BigDecimalCoder(),
BasicType.INSTANT: InstantCoder()
}
def from_type_info(type_info: TypeInformation) -> FieldCoder:
"""
Mappings from type_info to Coder
"""
if isinstance(type_info, PickledBytesTypeInfo):
return PickleCoder()
elif isinstance(type_info, BasicTypeInfo):
return _basic_type_info_mappings[type_info._basic_type]
elif isinstance(type_info, DateTypeInfo):
return DateCoder()
elif isinstance(type_info, TimeTypeInfo):
return TimeCoder()
elif isinstance(type_info, TimestampTypeInfo):
return TimestampCoder(3)
elif isinstance(type_info, PrimitiveArrayTypeInfo):
element_type = type_info._element_type
if isinstance(element_type, BasicTypeInfo) and element_type._basic_type == BasicType.BYTE:
return BinaryCoder()
else:
return PrimitiveArrayCoder(from_type_info(element_type))
elif isinstance(type_info, (BasicArrayTypeInfo, ObjectArrayTypeInfo)):
return GenericArrayCoder(from_type_info(type_info._element_type))
elif isinstance(type_info, ListTypeInfo):
return GenericArrayCoder(from_type_info(type_info.elem_type))
elif isinstance(type_info, MapTypeInfo):
return MapCoder(
from_type_info(type_info._key_type_info), from_type_info(type_info._value_type_info))
elif isinstance(type_info, TupleTypeInfo):
return TupleCoder([from_type_info(field_type)
for field_type in type_info.get_field_types()])
elif isinstance(type_info, RowTypeInfo):
return RowCoder(
[from_type_info(f) for f in type_info.get_field_types()],
[f for f in type_info.get_field_names()])
elif isinstance(type_info, ExternalTypeInfo):
return from_type_info(type_info._type_info)
elif isinstance(type_info, GenericRecordAvroTypeInfo):
return AvroCoder(type_info._schema)
else:
raise ValueError("Unsupported type_info %s." % type_info)
| 28,439 | 33.430993 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
if 'PYFLINK_CYTHON_ENABLED' in os.environ:
PYFLINK_CYTHON_ENABLED = bool(os.environ['PYFLINK_CYTHON_ENABLED'])
else:
PYFLINK_CYTHON_ENABLED = True
# Supported combinations:
# 1) PyFlink Fast + Beam Fast
# 2) PyFlink Slow + Beam Slow
# 3) PyFlink Slow + Beam Fast
# Check whether beam could be fast and force PyFlink to be slow if beam is slow
try:
from apache_beam.coders import stream # noqa # pylint: disable=unused-import
except:
PYFLINK_CYTHON_ENABLED = False
# Check whether PyFlink could be fast
try:
from pyflink.fn_execution import stream_fast, coder_impl_fast \
# noqa # pylint: disable=unused-import
from pyflink.fn_execution.beam import \
beam_operations_fast, beam_coder_impl_fast, beam_stream_fast \
# noqa # pylint: disable=unused-import
from pyflink.fn_execution.table import window_aggregate_fast, aggregate_fast \
# noqa # pylint: disable=unused-import
except:
PYFLINK_CYTHON_ENABLED = False
| 1,947 | 38.755102 | 82 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/flink_fn_execution_pb2.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: flink-fn-execution.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18\x66link-fn-execution.proto\x12 org.apache.flink.fn_execution.v1\"*\n\x0cJobParameter\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t\"\x86\x01\n\x05Input\x12\x44\n\x03udf\x18\x01 \x01(\x0b\x32\x35.org.apache.flink.fn_execution.v1.UserDefinedFunctionH\x00\x12\x15\n\x0binputOffset\x18\x02 \x01(\x05H\x00\x12\x17\n\rinputConstant\x18\x03 \x01(\x0cH\x00\x42\x07\n\x05input\"\xa8\x01\n\x13UserDefinedFunction\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\x37\n\x06inputs\x18\x02 \x03(\x0b\x32\'.org.apache.flink.fn_execution.v1.Input\x12\x14\n\x0cwindow_index\x18\x03 \x01(\x05\x12\x1a\n\x12takes_row_as_input\x18\x04 \x01(\x08\x12\x15\n\ris_pandas_udf\x18\x05 \x01(\x08\"\x93\x02\n\x14UserDefinedFunctions\x12\x43\n\x04udfs\x18\x01 \x03(\x0b\x32\x35.org.apache.flink.fn_execution.v1.UserDefinedFunction\x12\x16\n\x0emetric_enabled\x18\x02 \x01(\x08\x12=\n\x07windows\x18\x03 \x03(\x0b\x32,.org.apache.flink.fn_execution.v1.OverWindow\x12\x17\n\x0fprofile_enabled\x18\x04 \x01(\x08\x12\x46\n\x0ejob_parameters\x18\x05 \x03(\x0b\x32..org.apache.flink.fn_execution.v1.JobParameter\"\xdd\x02\n\nOverWindow\x12L\n\x0bwindow_type\x18\x01 \x01(\x0e\x32\x37.org.apache.flink.fn_execution.v1.OverWindow.WindowType\x12\x16\n\x0elower_boundary\x18\x02 \x01(\x03\x12\x16\n\x0eupper_boundary\x18\x03 \x01(\x03\"\xd0\x01\n\nWindowType\x12\x13\n\x0fRANGE_UNBOUNDED\x10\x00\x12\x1d\n\x19RANGE_UNBOUNDED_PRECEDING\x10\x01\x12\x1d\n\x19RANGE_UNBOUNDED_FOLLOWING\x10\x02\x12\x11\n\rRANGE_SLIDING\x10\x03\x12\x11\n\rROW_UNBOUNDED\x10\x04\x12\x1b\n\x17ROW_UNBOUNDED_PRECEDING\x10\x05\x12\x1b\n\x17ROW_UNBOUNDED_FOLLOWING\x10\x06\x12\x0f\n\x0bROW_SLIDING\x10\x07\"\x8b\x06\n\x1cUserDefinedAggregateFunction\x12\x0f\n\x07payload\x18\x01 \x01(\x0c\x12\x37\n\x06inputs\x18\x02 \x03(\x0b\x32\'.org.apache.flink.fn_execution.v1.Input\x12Z\n\x05specs\x18\x03 \x03(\x0b\x32K.org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction.DataViewSpec\x12\x12\n\nfilter_arg\x18\x04 \x01(\x05\x12\x10\n\x08\x64istinct\x18\x05 \x01(\x08\x12\x1a\n\x12takes_row_as_input\x18\x06 \x01(\x08\x1a\x82\x04\n\x0c\x44\x61taViewSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x66ield_index\x18\x02 \x01(\x05\x12i\n\tlist_view\x18\x03 \x01(\x0b\x32T.org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction.DataViewSpec.ListViewH\x00\x12g\n\x08map_view\x18\x04 \x01(\x0b\x32S.org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction.DataViewSpec.MapViewH\x00\x1aT\n\x08ListView\x12H\n\x0c\x65lement_type\x18\x01 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldType\x1a\x97\x01\n\x07MapView\x12\x44\n\x08key_type\x18\x01 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldType\x12\x46\n\nvalue_type\x18\x02 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldTypeB\x0b\n\tdata_view\"\xac\x04\n\x0bGroupWindow\x12M\n\x0bwindow_type\x18\x01 \x01(\x0e\x32\x38.org.apache.flink.fn_execution.v1.GroupWindow.WindowType\x12\x16\n\x0eis_time_window\x18\x02 \x01(\x08\x12\x14\n\x0cwindow_slide\x18\x03 \x01(\x03\x12\x13\n\x0bwindow_size\x18\x04 \x01(\x03\x12\x12\n\nwindow_gap\x18\x05 \x01(\x03\x12\x13\n\x0bis_row_time\x18\x06 \x01(\x08\x12\x18\n\x10time_field_index\x18\x07 \x01(\x05\x12\x17\n\x0f\x61llowedLateness\x18\x08 \x01(\x03\x12U\n\x0fnamedProperties\x18\t \x03(\x0e\x32<.org.apache.flink.fn_execution.v1.GroupWindow.WindowProperty\x12\x16\n\x0eshift_timezone\x18\n \x01(\t\"[\n\nWindowType\x12\x19\n\x15TUMBLING_GROUP_WINDOW\x10\x00\x12\x18\n\x14SLIDING_GROUP_WINDOW\x10\x01\x12\x18\n\x14SESSION_GROUP_WINDOW\x10\x02\"c\n\x0eWindowProperty\x12\x10\n\x0cWINDOW_START\x10\x00\x12\x0e\n\nWINDOW_END\x10\x01\x12\x16\n\x12ROW_TIME_ATTRIBUTE\x10\x02\x12\x17\n\x13PROC_TIME_ATTRIBUTE\x10\x03\"\xde\x04\n\x1dUserDefinedAggregateFunctions\x12L\n\x04udfs\x18\x01 \x03(\x0b\x32>.org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction\x12\x16\n\x0emetric_enabled\x18\x02 \x01(\x08\x12\x10\n\x08grouping\x18\x03 \x03(\x05\x12\x1e\n\x16generate_update_before\x18\x04 \x01(\x08\x12\x44\n\x08key_type\x18\x05 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldType\x12\x1b\n\x13index_of_count_star\x18\x06 \x01(\x05\x12\x1e\n\x16state_cleaning_enabled\x18\x07 \x01(\x08\x12\x18\n\x10state_cache_size\x18\x08 \x01(\x05\x12!\n\x19map_state_read_cache_size\x18\t \x01(\x05\x12\"\n\x1amap_state_write_cache_size\x18\n \x01(\x05\x12\x1b\n\x13\x63ount_star_inserted\x18\x0b \x01(\x08\x12\x43\n\x0cgroup_window\x18\x0c \x01(\x0b\x32-.org.apache.flink.fn_execution.v1.GroupWindow\x12\x17\n\x0fprofile_enabled\x18\r \x01(\x08\x12\x46\n\x0ejob_parameters\x18\x0e \x03(\x0b\x32..org.apache.flink.fn_execution.v1.JobParameter\"\xf6\x0f\n\x06Schema\x12>\n\x06\x66ields\x18\x01 \x03(\x0b\x32..org.apache.flink.fn_execution.v1.Schema.Field\x1a\x97\x01\n\x07MapInfo\x12\x44\n\x08key_type\x18\x01 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldType\x12\x46\n\nvalue_type\x18\x02 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldType\x1a\x1d\n\x08TimeInfo\x12\x11\n\tprecision\x18\x01 \x01(\x05\x1a\"\n\rTimestampInfo\x12\x11\n\tprecision\x18\x01 \x01(\x05\x1a,\n\x17LocalZonedTimestampInfo\x12\x11\n\tprecision\x18\x01 \x01(\x05\x1a\'\n\x12ZonedTimestampInfo\x12\x11\n\tprecision\x18\x01 \x01(\x05\x1a/\n\x0b\x44\x65\x63imalInfo\x12\x11\n\tprecision\x18\x01 \x01(\x05\x12\r\n\x05scale\x18\x02 \x01(\x05\x1a\x1c\n\nBinaryInfo\x12\x0e\n\x06length\x18\x01 \x01(\x05\x1a\x1f\n\rVarBinaryInfo\x12\x0e\n\x06length\x18\x01 \x01(\x05\x1a\x1a\n\x08\x43harInfo\x12\x0e\n\x06length\x18\x01 \x01(\x05\x1a\x1d\n\x0bVarCharInfo\x12\x0e\n\x06length\x18\x01 \x01(\x05\x1a\xb0\x08\n\tFieldType\x12\x44\n\ttype_name\x18\x01 \x01(\x0e\x32\x31.org.apache.flink.fn_execution.v1.Schema.TypeName\x12\x10\n\x08nullable\x18\x02 \x01(\x08\x12U\n\x17\x63ollection_element_type\x18\x03 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldTypeH\x00\x12\x44\n\x08map_info\x18\x04 \x01(\x0b\x32\x30.org.apache.flink.fn_execution.v1.Schema.MapInfoH\x00\x12>\n\nrow_schema\x18\x05 \x01(\x0b\x32(.org.apache.flink.fn_execution.v1.SchemaH\x00\x12L\n\x0c\x64\x65\x63imal_info\x18\x06 \x01(\x0b\x32\x34.org.apache.flink.fn_execution.v1.Schema.DecimalInfoH\x00\x12\x46\n\ttime_info\x18\x07 \x01(\x0b\x32\x31.org.apache.flink.fn_execution.v1.Schema.TimeInfoH\x00\x12P\n\x0etimestamp_info\x18\x08 \x01(\x0b\x32\x36.org.apache.flink.fn_execution.v1.Schema.TimestampInfoH\x00\x12\x66\n\x1alocal_zoned_timestamp_info\x18\t \x01(\x0b\[email protected]_execution.v1.Schema.LocalZonedTimestampInfoH\x00\x12[\n\x14zoned_timestamp_info\x18\n \x01(\x0b\x32;.org.apache.flink.fn_execution.v1.Schema.ZonedTimestampInfoH\x00\x12J\n\x0b\x62inary_info\x18\x0b \x01(\x0b\x32\x33.org.apache.flink.fn_execution.v1.Schema.BinaryInfoH\x00\x12Q\n\x0fvar_binary_info\x18\x0c \x01(\x0b\x32\x36.org.apache.flink.fn_execution.v1.Schema.VarBinaryInfoH\x00\x12\x46\n\tchar_info\x18\r \x01(\x0b\x32\x31.org.apache.flink.fn_execution.v1.Schema.CharInfoH\x00\x12M\n\rvar_char_info\x18\x0e \x01(\x0b\x32\x34.org.apache.flink.fn_execution.v1.Schema.VarCharInfoH\x00\x42\x0b\n\ttype_info\x1al\n\x05\x46ield\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12@\n\x04type\x18\x03 \x01(\x0b\x32\x32.org.apache.flink.fn_execution.v1.Schema.FieldType\"\xab\x02\n\x08TypeName\x12\x07\n\x03ROW\x10\x00\x12\x0b\n\x07TINYINT\x10\x01\x12\x0c\n\x08SMALLINT\x10\x02\x12\x07\n\x03INT\x10\x03\x12\n\n\x06\x42IGINT\x10\x04\x12\x0b\n\x07\x44\x45\x43IMAL\x10\x05\x12\t\n\x05\x46LOAT\x10\x06\x12\n\n\x06\x44OUBLE\x10\x07\x12\x08\n\x04\x44\x41TE\x10\x08\x12\x08\n\x04TIME\x10\t\x12\r\n\tTIMESTAMP\x10\n\x12\x0b\n\x07\x42OOLEAN\x10\x0b\x12\n\n\x06\x42INARY\x10\x0c\x12\r\n\tVARBINARY\x10\r\x12\x08\n\x04\x43HAR\x10\x0e\x12\x0b\n\x07VARCHAR\x10\x0f\x12\x0f\n\x0b\x42\x41SIC_ARRAY\x10\x10\x12\x07\n\x03MAP\x10\x11\x12\x0c\n\x08MULTISET\x10\x12\x12\x19\n\x15LOCAL_ZONED_TIMESTAMP\x10\x13\x12\x13\n\x0fZONED_TIMESTAMP\x10\x14\x12\x08\n\x04NULL\x10\x15\"\xc3\n\n\x08TypeInfo\x12\x46\n\ttype_name\x18\x01 \x01(\x0e\x32\x33.org.apache.flink.fn_execution.v1.TypeInfo.TypeName\x12M\n\x17\x63ollection_element_type\x18\x02 \x01(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfoH\x00\x12O\n\rrow_type_info\x18\x03 \x01(\x0b\x32\x36.org.apache.flink.fn_execution.v1.TypeInfo.RowTypeInfoH\x00\x12S\n\x0ftuple_type_info\x18\x04 \x01(\x0b\x32\x38.org.apache.flink.fn_execution.v1.TypeInfo.TupleTypeInfoH\x00\x12O\n\rmap_type_info\x18\x05 \x01(\x0b\x32\x36.org.apache.flink.fn_execution.v1.TypeInfo.MapTypeInfoH\x00\x12Q\n\x0e\x61vro_type_info\x18\x06 \x01(\x0b\x32\x37.org.apache.flink.fn_execution.v1.TypeInfo.AvroTypeInfoH\x00\x1a\x8b\x01\n\x0bMapTypeInfo\x12<\n\x08key_type\x18\x01 \x01(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfo\x12>\n\nvalue_type\x18\x02 \x01(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfo\x1a\xb8\x01\n\x0bRowTypeInfo\x12L\n\x06\x66ields\x18\x01 \x03(\x0b\x32<.org.apache.flink.fn_execution.v1.TypeInfo.RowTypeInfo.Field\x1a[\n\x05\x46ield\x12\x12\n\nfield_name\x18\x01 \x01(\t\x12>\n\nfield_type\x18\x02 \x01(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfo\x1aP\n\rTupleTypeInfo\x12?\n\x0b\x66ield_types\x18\x01 \x03(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfo\x1a\x1e\n\x0c\x41vroTypeInfo\x12\x0e\n\x06schema\x18\x01 \x01(\t\"\x8d\x03\n\x08TypeName\x12\x07\n\x03ROW\x10\x00\x12\n\n\x06STRING\x10\x01\x12\x08\n\x04\x42YTE\x10\x02\x12\x0b\n\x07\x42OOLEAN\x10\x03\x12\t\n\x05SHORT\x10\x04\x12\x07\n\x03INT\x10\x05\x12\x08\n\x04LONG\x10\x06\x12\t\n\x05\x46LOAT\x10\x07\x12\n\n\x06\x44OUBLE\x10\x08\x12\x08\n\x04\x43HAR\x10\t\x12\x0b\n\x07\x42IG_INT\x10\n\x12\x0b\n\x07\x42IG_DEC\x10\x0b\x12\x0c\n\x08SQL_DATE\x10\x0c\x12\x0c\n\x08SQL_TIME\x10\r\x12\x11\n\rSQL_TIMESTAMP\x10\x0e\x12\x0f\n\x0b\x42\x41SIC_ARRAY\x10\x0f\x12\x13\n\x0fPRIMITIVE_ARRAY\x10\x10\x12\t\n\x05TUPLE\x10\x11\x12\x08\n\x04LIST\x10\x12\x12\x07\n\x03MAP\x10\x13\x12\x11\n\rPICKLED_BYTES\x10\x14\x12\x10\n\x0cOBJECT_ARRAY\x10\x15\x12\x0b\n\x07INSTANT\x10\x16\x12\x08\n\x04\x41VRO\x10\x17\x12\x0e\n\nLOCAL_DATE\x10\x18\x12\x0e\n\nLOCAL_TIME\x10\x19\x12\x12\n\x0eLOCAL_DATETIME\x10\x1a\x12\x19\n\x15LOCAL_ZONED_TIMESTAMP\x10\x1b\x42\x0b\n\ttype_info\"\xd1\x07\n\x1dUserDefinedDataStreamFunction\x12\x63\n\rfunction_type\x18\x01 \x01(\x0e\x32L.org.apache.flink.fn_execution.v1.UserDefinedDataStreamFunction.FunctionType\x12g\n\x0fruntime_context\x18\x02 \x01(\x0b\x32N.org.apache.flink.fn_execution.v1.UserDefinedDataStreamFunction.RuntimeContext\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\x12\x16\n\x0emetric_enabled\x18\x04 \x01(\x08\x12\x41\n\rkey_type_info\x18\x05 \x01(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfo\x12\x17\n\x0fprofile_enabled\x18\x06 \x01(\x08\x12\x17\n\x0fhas_side_output\x18\x07 \x01(\x08\x12\x18\n\x10state_cache_size\x18\x08 \x01(\x05\x12!\n\x19map_state_read_cache_size\x18\t \x01(\x05\x12\"\n\x1amap_state_write_cache_size\x18\n \x01(\x05\x1a\xb2\x02\n\x0eRuntimeContext\x12\x11\n\ttask_name\x18\x01 \x01(\t\x12\x1f\n\x17task_name_with_subtasks\x18\x02 \x01(\t\x12#\n\x1bnumber_of_parallel_subtasks\x18\x03 \x01(\x05\x12\'\n\x1fmax_number_of_parallel_subtasks\x18\x04 \x01(\x05\x12\x1d\n\x15index_of_this_subtask\x18\x05 \x01(\x05\x12\x16\n\x0e\x61ttempt_number\x18\x06 \x01(\x05\x12\x46\n\x0ejob_parameters\x18\x07 \x03(\x0b\x32..org.apache.flink.fn_execution.v1.JobParameter\x12\x1f\n\x17in_batch_execution_mode\x18\x08 \x01(\x08\"\xad\x01\n\x0c\x46unctionType\x12\x0b\n\x07PROCESS\x10\x00\x12\x0e\n\nCO_PROCESS\x10\x01\x12\x11\n\rKEYED_PROCESS\x10\x02\x12\x14\n\x10KEYED_CO_PROCESS\x10\x03\x12\n\n\x06WINDOW\x10\x04\x12\x18\n\x14\x43O_BROADCAST_PROCESS\x10\x05\x12\x1e\n\x1aKEYED_CO_BROADCAST_PROCESS\x10\x06\x12\x11\n\rREVISE_OUTPUT\x10\x64\"\xe4\x0e\n\x0fStateDescriptor\x12\x12\n\nstate_name\x18\x01 \x01(\t\x12Z\n\x10state_ttl_config\x18\x02 \x01(\x0b\[email protected]_execution.v1.StateDescriptor.StateTTLConfig\x1a\xe0\r\n\x0eStateTTLConfig\x12`\n\x0bupdate_type\x18\x01 \x01(\x0e\x32K.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.UpdateType\x12j\n\x10state_visibility\x18\x02 \x01(\x0e\x32P.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.StateVisibility\x12w\n\x17ttl_time_characteristic\x18\x03 \x01(\x0e\x32V.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.TtlTimeCharacteristic\x12\x0b\n\x03ttl\x18\x04 \x01(\x03\x12n\n\x12\x63leanup_strategies\x18\x05 \x01(\x0b\x32R.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies\x1a\xca\x08\n\x11\x43leanupStrategies\x12 \n\x18is_cleanup_in_background\x18\x01 \x01(\x08\x12y\n\nstrategies\x18\x02 \x03(\x0b\x32\x65.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry\x1aX\n\x1aIncrementalCleanupStrategy\x12\x14\n\x0c\x63leanup_size\x18\x01 \x01(\x05\x12$\n\x1crun_cleanup_for_every_record\x18\x02 \x01(\x08\x1aK\n#RocksdbCompactFilterCleanupStrategy\x12$\n\x1cquery_time_after_num_entries\x18\x01 \x01(\x03\x1a\xe0\x04\n\x12MapStrategiesEntry\x12o\n\x08strategy\x18\x01 \x01(\x0e\x32].org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies\x12\x81\x01\n\x0e\x65mpty_strategy\x18\x02 \x01(\x0e\x32g.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.EmptyCleanupStrategyH\x00\x12\x95\x01\n\x1cincremental_cleanup_strategy\x18\x03 \x01(\x0b\x32m.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.IncrementalCleanupStrategyH\x00\x12\xa9\x01\n\'rocksdb_compact_filter_cleanup_strategy\x18\x04 \x01(\x0b\x32v.org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategyH\x00\x42\x11\n\x0f\x43leanupStrategy\"b\n\nStrategies\x12\x1c\n\x18\x46ULL_STATE_SCAN_SNAPSHOT\x10\x00\x12\x17\n\x13INCREMENTAL_CLEANUP\x10\x01\x12\x1d\n\x19ROCKSDB_COMPACTION_FILTER\x10\x02\"*\n\x14\x45mptyCleanupStrategy\x12\x12\n\x0e\x45MPTY_STRATEGY\x10\x00\"D\n\nUpdateType\x12\x0c\n\x08\x44isabled\x10\x00\x12\x14\n\x10OnCreateAndWrite\x10\x01\x12\x12\n\x0eOnReadAndWrite\x10\x02\"J\n\x0fStateVisibility\x12\x1f\n\x1bReturnExpiredIfNotCleanedUp\x10\x00\x12\x16\n\x12NeverReturnExpired\x10\x01\"+\n\x15TtlTimeCharacteristic\x12\x12\n\x0eProcessingTime\x10\x00\"\xf1\x07\n\x13\x43oderInfoDescriptor\x12`\n\x10\x66latten_row_type\x18\x01 \x01(\x0b\x32\x44.org.apache.flink.fn_execution.v1.CoderInfoDescriptor.FlattenRowTypeH\x00\x12Q\n\x08row_type\x18\x02 \x01(\x0b\x32=.org.apache.flink.fn_execution.v1.CoderInfoDescriptor.RowTypeH\x00\x12U\n\narrow_type\x18\x03 \x01(\x0b\x32?.org.apache.flink.fn_execution.v1.CoderInfoDescriptor.ArrowTypeH\x00\x12k\n\x16over_window_arrow_type\x18\x04 \x01(\x0b\x32I.org.apache.flink.fn_execution.v1.CoderInfoDescriptor.OverWindowArrowTypeH\x00\x12Q\n\x08raw_type\x18\x05 \x01(\x0b\x32=.org.apache.flink.fn_execution.v1.CoderInfoDescriptor.RawTypeH\x00\x12H\n\x04mode\x18\x06 \x01(\x0e\x32:.org.apache.flink.fn_execution.v1.CoderInfoDescriptor.Mode\x12\"\n\x1aseparated_with_end_message\x18\x07 \x01(\x08\x1aJ\n\x0e\x46lattenRowType\x12\x38\n\x06schema\x18\x01 \x01(\x0b\x32(.org.apache.flink.fn_execution.v1.Schema\x1a\x43\n\x07RowType\x12\x38\n\x06schema\x18\x01 \x01(\x0b\x32(.org.apache.flink.fn_execution.v1.Schema\x1a\x45\n\tArrowType\x12\x38\n\x06schema\x18\x01 \x01(\x0b\x32(.org.apache.flink.fn_execution.v1.Schema\x1aO\n\x13OverWindowArrowType\x12\x38\n\x06schema\x18\x01 \x01(\x0b\x32(.org.apache.flink.fn_execution.v1.Schema\x1aH\n\x07RawType\x12=\n\ttype_info\x18\x01 \x01(\x0b\x32*.org.apache.flink.fn_execution.v1.TypeInfo\" \n\x04Mode\x12\n\n\x06SINGLE\x10\x00\x12\x0c\n\x08MULTIPLE\x10\x01\x42\x0b\n\tdata_typeB-\n\x1forg.apache.flink.fnexecution.v1B\nFlinkFnApib\x06proto3')
_JOBPARAMETER = DESCRIPTOR.message_types_by_name['JobParameter']
_INPUT = DESCRIPTOR.message_types_by_name['Input']
_USERDEFINEDFUNCTION = DESCRIPTOR.message_types_by_name['UserDefinedFunction']
_USERDEFINEDFUNCTIONS = DESCRIPTOR.message_types_by_name['UserDefinedFunctions']
_OVERWINDOW = DESCRIPTOR.message_types_by_name['OverWindow']
_USERDEFINEDAGGREGATEFUNCTION = DESCRIPTOR.message_types_by_name['UserDefinedAggregateFunction']
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC = _USERDEFINEDAGGREGATEFUNCTION.nested_types_by_name['DataViewSpec']
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_LISTVIEW = _USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC.nested_types_by_name['ListView']
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_MAPVIEW = _USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC.nested_types_by_name['MapView']
_GROUPWINDOW = DESCRIPTOR.message_types_by_name['GroupWindow']
_USERDEFINEDAGGREGATEFUNCTIONS = DESCRIPTOR.message_types_by_name['UserDefinedAggregateFunctions']
_SCHEMA = DESCRIPTOR.message_types_by_name['Schema']
_SCHEMA_MAPINFO = _SCHEMA.nested_types_by_name['MapInfo']
_SCHEMA_TIMEINFO = _SCHEMA.nested_types_by_name['TimeInfo']
_SCHEMA_TIMESTAMPINFO = _SCHEMA.nested_types_by_name['TimestampInfo']
_SCHEMA_LOCALZONEDTIMESTAMPINFO = _SCHEMA.nested_types_by_name['LocalZonedTimestampInfo']
_SCHEMA_ZONEDTIMESTAMPINFO = _SCHEMA.nested_types_by_name['ZonedTimestampInfo']
_SCHEMA_DECIMALINFO = _SCHEMA.nested_types_by_name['DecimalInfo']
_SCHEMA_BINARYINFO = _SCHEMA.nested_types_by_name['BinaryInfo']
_SCHEMA_VARBINARYINFO = _SCHEMA.nested_types_by_name['VarBinaryInfo']
_SCHEMA_CHARINFO = _SCHEMA.nested_types_by_name['CharInfo']
_SCHEMA_VARCHARINFO = _SCHEMA.nested_types_by_name['VarCharInfo']
_SCHEMA_FIELDTYPE = _SCHEMA.nested_types_by_name['FieldType']
_SCHEMA_FIELD = _SCHEMA.nested_types_by_name['Field']
_TYPEINFO = DESCRIPTOR.message_types_by_name['TypeInfo']
_TYPEINFO_MAPTYPEINFO = _TYPEINFO.nested_types_by_name['MapTypeInfo']
_TYPEINFO_ROWTYPEINFO = _TYPEINFO.nested_types_by_name['RowTypeInfo']
_TYPEINFO_ROWTYPEINFO_FIELD = _TYPEINFO_ROWTYPEINFO.nested_types_by_name['Field']
_TYPEINFO_TUPLETYPEINFO = _TYPEINFO.nested_types_by_name['TupleTypeInfo']
_TYPEINFO_AVROTYPEINFO = _TYPEINFO.nested_types_by_name['AvroTypeInfo']
_USERDEFINEDDATASTREAMFUNCTION = DESCRIPTOR.message_types_by_name['UserDefinedDataStreamFunction']
_USERDEFINEDDATASTREAMFUNCTION_RUNTIMECONTEXT = _USERDEFINEDDATASTREAMFUNCTION.nested_types_by_name['RuntimeContext']
_STATEDESCRIPTOR = DESCRIPTOR.message_types_by_name['StateDescriptor']
_STATEDESCRIPTOR_STATETTLCONFIG = _STATEDESCRIPTOR.nested_types_by_name['StateTTLConfig']
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES = _STATEDESCRIPTOR_STATETTLCONFIG.nested_types_by_name['CleanupStrategies']
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_INCREMENTALCLEANUPSTRATEGY = _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES.nested_types_by_name['IncrementalCleanupStrategy']
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_ROCKSDBCOMPACTFILTERCLEANUPSTRATEGY = _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES.nested_types_by_name['RocksdbCompactFilterCleanupStrategy']
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_MAPSTRATEGIESENTRY = _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES.nested_types_by_name['MapStrategiesEntry']
_CODERINFODESCRIPTOR = DESCRIPTOR.message_types_by_name['CoderInfoDescriptor']
_CODERINFODESCRIPTOR_FLATTENROWTYPE = _CODERINFODESCRIPTOR.nested_types_by_name['FlattenRowType']
_CODERINFODESCRIPTOR_ROWTYPE = _CODERINFODESCRIPTOR.nested_types_by_name['RowType']
_CODERINFODESCRIPTOR_ARROWTYPE = _CODERINFODESCRIPTOR.nested_types_by_name['ArrowType']
_CODERINFODESCRIPTOR_OVERWINDOWARROWTYPE = _CODERINFODESCRIPTOR.nested_types_by_name['OverWindowArrowType']
_CODERINFODESCRIPTOR_RAWTYPE = _CODERINFODESCRIPTOR.nested_types_by_name['RawType']
_OVERWINDOW_WINDOWTYPE = _OVERWINDOW.enum_types_by_name['WindowType']
_GROUPWINDOW_WINDOWTYPE = _GROUPWINDOW.enum_types_by_name['WindowType']
_GROUPWINDOW_WINDOWPROPERTY = _GROUPWINDOW.enum_types_by_name['WindowProperty']
_SCHEMA_TYPENAME = _SCHEMA.enum_types_by_name['TypeName']
_TYPEINFO_TYPENAME = _TYPEINFO.enum_types_by_name['TypeName']
_USERDEFINEDDATASTREAMFUNCTION_FUNCTIONTYPE = _USERDEFINEDDATASTREAMFUNCTION.enum_types_by_name['FunctionType']
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_STRATEGIES = _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES.enum_types_by_name['Strategies']
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_EMPTYCLEANUPSTRATEGY = _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES.enum_types_by_name['EmptyCleanupStrategy']
_STATEDESCRIPTOR_STATETTLCONFIG_UPDATETYPE = _STATEDESCRIPTOR_STATETTLCONFIG.enum_types_by_name['UpdateType']
_STATEDESCRIPTOR_STATETTLCONFIG_STATEVISIBILITY = _STATEDESCRIPTOR_STATETTLCONFIG.enum_types_by_name['StateVisibility']
_STATEDESCRIPTOR_STATETTLCONFIG_TTLTIMECHARACTERISTIC = _STATEDESCRIPTOR_STATETTLCONFIG.enum_types_by_name['TtlTimeCharacteristic']
_CODERINFODESCRIPTOR_MODE = _CODERINFODESCRIPTOR.enum_types_by_name['Mode']
JobParameter = _reflection.GeneratedProtocolMessageType('JobParameter', (_message.Message,), {
'DESCRIPTOR' : _JOBPARAMETER,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.JobParameter)
})
_sym_db.RegisterMessage(JobParameter)
Input = _reflection.GeneratedProtocolMessageType('Input', (_message.Message,), {
'DESCRIPTOR' : _INPUT,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Input)
})
_sym_db.RegisterMessage(Input)
UserDefinedFunction = _reflection.GeneratedProtocolMessageType('UserDefinedFunction', (_message.Message,), {
'DESCRIPTOR' : _USERDEFINEDFUNCTION,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedFunction)
})
_sym_db.RegisterMessage(UserDefinedFunction)
UserDefinedFunctions = _reflection.GeneratedProtocolMessageType('UserDefinedFunctions', (_message.Message,), {
'DESCRIPTOR' : _USERDEFINEDFUNCTIONS,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedFunctions)
})
_sym_db.RegisterMessage(UserDefinedFunctions)
OverWindow = _reflection.GeneratedProtocolMessageType('OverWindow', (_message.Message,), {
'DESCRIPTOR' : _OVERWINDOW,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.OverWindow)
})
_sym_db.RegisterMessage(OverWindow)
UserDefinedAggregateFunction = _reflection.GeneratedProtocolMessageType('UserDefinedAggregateFunction', (_message.Message,), {
'DataViewSpec' : _reflection.GeneratedProtocolMessageType('DataViewSpec', (_message.Message,), {
'ListView' : _reflection.GeneratedProtocolMessageType('ListView', (_message.Message,), {
'DESCRIPTOR' : _USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_LISTVIEW,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction.DataViewSpec.ListView)
})
,
'MapView' : _reflection.GeneratedProtocolMessageType('MapView', (_message.Message,), {
'DESCRIPTOR' : _USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_MAPVIEW,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction.DataViewSpec.MapView)
})
,
'DESCRIPTOR' : _USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction.DataViewSpec)
})
,
'DESCRIPTOR' : _USERDEFINEDAGGREGATEFUNCTION,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedAggregateFunction)
})
_sym_db.RegisterMessage(UserDefinedAggregateFunction)
_sym_db.RegisterMessage(UserDefinedAggregateFunction.DataViewSpec)
_sym_db.RegisterMessage(UserDefinedAggregateFunction.DataViewSpec.ListView)
_sym_db.RegisterMessage(UserDefinedAggregateFunction.DataViewSpec.MapView)
GroupWindow = _reflection.GeneratedProtocolMessageType('GroupWindow', (_message.Message,), {
'DESCRIPTOR' : _GROUPWINDOW,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.GroupWindow)
})
_sym_db.RegisterMessage(GroupWindow)
UserDefinedAggregateFunctions = _reflection.GeneratedProtocolMessageType('UserDefinedAggregateFunctions', (_message.Message,), {
'DESCRIPTOR' : _USERDEFINEDAGGREGATEFUNCTIONS,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedAggregateFunctions)
})
_sym_db.RegisterMessage(UserDefinedAggregateFunctions)
Schema = _reflection.GeneratedProtocolMessageType('Schema', (_message.Message,), {
'MapInfo' : _reflection.GeneratedProtocolMessageType('MapInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_MAPINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.MapInfo)
})
,
'TimeInfo' : _reflection.GeneratedProtocolMessageType('TimeInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_TIMEINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.TimeInfo)
})
,
'TimestampInfo' : _reflection.GeneratedProtocolMessageType('TimestampInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_TIMESTAMPINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.TimestampInfo)
})
,
'LocalZonedTimestampInfo' : _reflection.GeneratedProtocolMessageType('LocalZonedTimestampInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_LOCALZONEDTIMESTAMPINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.LocalZonedTimestampInfo)
})
,
'ZonedTimestampInfo' : _reflection.GeneratedProtocolMessageType('ZonedTimestampInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_ZONEDTIMESTAMPINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.ZonedTimestampInfo)
})
,
'DecimalInfo' : _reflection.GeneratedProtocolMessageType('DecimalInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_DECIMALINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.DecimalInfo)
})
,
'BinaryInfo' : _reflection.GeneratedProtocolMessageType('BinaryInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_BINARYINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.BinaryInfo)
})
,
'VarBinaryInfo' : _reflection.GeneratedProtocolMessageType('VarBinaryInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_VARBINARYINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.VarBinaryInfo)
})
,
'CharInfo' : _reflection.GeneratedProtocolMessageType('CharInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_CHARINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.CharInfo)
})
,
'VarCharInfo' : _reflection.GeneratedProtocolMessageType('VarCharInfo', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_VARCHARINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.VarCharInfo)
})
,
'FieldType' : _reflection.GeneratedProtocolMessageType('FieldType', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_FIELDTYPE,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.FieldType)
})
,
'Field' : _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _SCHEMA_FIELD,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema.Field)
})
,
'DESCRIPTOR' : _SCHEMA,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.Schema)
})
_sym_db.RegisterMessage(Schema)
_sym_db.RegisterMessage(Schema.MapInfo)
_sym_db.RegisterMessage(Schema.TimeInfo)
_sym_db.RegisterMessage(Schema.TimestampInfo)
_sym_db.RegisterMessage(Schema.LocalZonedTimestampInfo)
_sym_db.RegisterMessage(Schema.ZonedTimestampInfo)
_sym_db.RegisterMessage(Schema.DecimalInfo)
_sym_db.RegisterMessage(Schema.BinaryInfo)
_sym_db.RegisterMessage(Schema.VarBinaryInfo)
_sym_db.RegisterMessage(Schema.CharInfo)
_sym_db.RegisterMessage(Schema.VarCharInfo)
_sym_db.RegisterMessage(Schema.FieldType)
_sym_db.RegisterMessage(Schema.Field)
TypeInfo = _reflection.GeneratedProtocolMessageType('TypeInfo', (_message.Message,), {
'MapTypeInfo' : _reflection.GeneratedProtocolMessageType('MapTypeInfo', (_message.Message,), {
'DESCRIPTOR' : _TYPEINFO_MAPTYPEINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.TypeInfo.MapTypeInfo)
})
,
'RowTypeInfo' : _reflection.GeneratedProtocolMessageType('RowTypeInfo', (_message.Message,), {
'Field' : _reflection.GeneratedProtocolMessageType('Field', (_message.Message,), {
'DESCRIPTOR' : _TYPEINFO_ROWTYPEINFO_FIELD,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.TypeInfo.RowTypeInfo.Field)
})
,
'DESCRIPTOR' : _TYPEINFO_ROWTYPEINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.TypeInfo.RowTypeInfo)
})
,
'TupleTypeInfo' : _reflection.GeneratedProtocolMessageType('TupleTypeInfo', (_message.Message,), {
'DESCRIPTOR' : _TYPEINFO_TUPLETYPEINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.TypeInfo.TupleTypeInfo)
})
,
'AvroTypeInfo' : _reflection.GeneratedProtocolMessageType('AvroTypeInfo', (_message.Message,), {
'DESCRIPTOR' : _TYPEINFO_AVROTYPEINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.TypeInfo.AvroTypeInfo)
})
,
'DESCRIPTOR' : _TYPEINFO,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.TypeInfo)
})
_sym_db.RegisterMessage(TypeInfo)
_sym_db.RegisterMessage(TypeInfo.MapTypeInfo)
_sym_db.RegisterMessage(TypeInfo.RowTypeInfo)
_sym_db.RegisterMessage(TypeInfo.RowTypeInfo.Field)
_sym_db.RegisterMessage(TypeInfo.TupleTypeInfo)
_sym_db.RegisterMessage(TypeInfo.AvroTypeInfo)
UserDefinedDataStreamFunction = _reflection.GeneratedProtocolMessageType('UserDefinedDataStreamFunction', (_message.Message,), {
'RuntimeContext' : _reflection.GeneratedProtocolMessageType('RuntimeContext', (_message.Message,), {
'DESCRIPTOR' : _USERDEFINEDDATASTREAMFUNCTION_RUNTIMECONTEXT,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedDataStreamFunction.RuntimeContext)
})
,
'DESCRIPTOR' : _USERDEFINEDDATASTREAMFUNCTION,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.UserDefinedDataStreamFunction)
})
_sym_db.RegisterMessage(UserDefinedDataStreamFunction)
_sym_db.RegisterMessage(UserDefinedDataStreamFunction.RuntimeContext)
StateDescriptor = _reflection.GeneratedProtocolMessageType('StateDescriptor', (_message.Message,), {
'StateTTLConfig' : _reflection.GeneratedProtocolMessageType('StateTTLConfig', (_message.Message,), {
'CleanupStrategies' : _reflection.GeneratedProtocolMessageType('CleanupStrategies', (_message.Message,), {
'IncrementalCleanupStrategy' : _reflection.GeneratedProtocolMessageType('IncrementalCleanupStrategy', (_message.Message,), {
'DESCRIPTOR' : _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_INCREMENTALCLEANUPSTRATEGY,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.IncrementalCleanupStrategy)
})
,
'RocksdbCompactFilterCleanupStrategy' : _reflection.GeneratedProtocolMessageType('RocksdbCompactFilterCleanupStrategy', (_message.Message,), {
'DESCRIPTOR' : _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_ROCKSDBCOMPACTFILTERCLEANUPSTRATEGY,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy)
})
,
'MapStrategiesEntry' : _reflection.GeneratedProtocolMessageType('MapStrategiesEntry', (_message.Message,), {
'DESCRIPTOR' : _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_MAPSTRATEGIESENTRY,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry)
})
,
'DESCRIPTOR' : _STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig.CleanupStrategies)
})
,
'DESCRIPTOR' : _STATEDESCRIPTOR_STATETTLCONFIG,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.StateDescriptor.StateTTLConfig)
})
,
'DESCRIPTOR' : _STATEDESCRIPTOR,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.StateDescriptor)
})
_sym_db.RegisterMessage(StateDescriptor)
_sym_db.RegisterMessage(StateDescriptor.StateTTLConfig)
_sym_db.RegisterMessage(StateDescriptor.StateTTLConfig.CleanupStrategies)
_sym_db.RegisterMessage(StateDescriptor.StateTTLConfig.CleanupStrategies.IncrementalCleanupStrategy)
_sym_db.RegisterMessage(StateDescriptor.StateTTLConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy)
_sym_db.RegisterMessage(StateDescriptor.StateTTLConfig.CleanupStrategies.MapStrategiesEntry)
CoderInfoDescriptor = _reflection.GeneratedProtocolMessageType('CoderInfoDescriptor', (_message.Message,), {
'FlattenRowType' : _reflection.GeneratedProtocolMessageType('FlattenRowType', (_message.Message,), {
'DESCRIPTOR' : _CODERINFODESCRIPTOR_FLATTENROWTYPE,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.CoderInfoDescriptor.FlattenRowType)
})
,
'RowType' : _reflection.GeneratedProtocolMessageType('RowType', (_message.Message,), {
'DESCRIPTOR' : _CODERINFODESCRIPTOR_ROWTYPE,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.CoderInfoDescriptor.RowType)
})
,
'ArrowType' : _reflection.GeneratedProtocolMessageType('ArrowType', (_message.Message,), {
'DESCRIPTOR' : _CODERINFODESCRIPTOR_ARROWTYPE,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.CoderInfoDescriptor.ArrowType)
})
,
'OverWindowArrowType' : _reflection.GeneratedProtocolMessageType('OverWindowArrowType', (_message.Message,), {
'DESCRIPTOR' : _CODERINFODESCRIPTOR_OVERWINDOWARROWTYPE,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.CoderInfoDescriptor.OverWindowArrowType)
})
,
'RawType' : _reflection.GeneratedProtocolMessageType('RawType', (_message.Message,), {
'DESCRIPTOR' : _CODERINFODESCRIPTOR_RAWTYPE,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.CoderInfoDescriptor.RawType)
})
,
'DESCRIPTOR' : _CODERINFODESCRIPTOR,
'__module__' : 'flink_fn_execution_pb2'
# @@protoc_insertion_point(class_scope:org.apache.flink.fn_execution.v1.CoderInfoDescriptor)
})
_sym_db.RegisterMessage(CoderInfoDescriptor)
_sym_db.RegisterMessage(CoderInfoDescriptor.FlattenRowType)
_sym_db.RegisterMessage(CoderInfoDescriptor.RowType)
_sym_db.RegisterMessage(CoderInfoDescriptor.ArrowType)
_sym_db.RegisterMessage(CoderInfoDescriptor.OverWindowArrowType)
_sym_db.RegisterMessage(CoderInfoDescriptor.RawType)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
DESCRIPTOR._serialized_options = b'\n\037org.apache.flink.fnexecution.v1B\nFlinkFnApi'
_JOBPARAMETER._serialized_start=62
_JOBPARAMETER._serialized_end=104
_INPUT._serialized_start=107
_INPUT._serialized_end=241
_USERDEFINEDFUNCTION._serialized_start=244
_USERDEFINEDFUNCTION._serialized_end=412
_USERDEFINEDFUNCTIONS._serialized_start=415
_USERDEFINEDFUNCTIONS._serialized_end=690
_OVERWINDOW._serialized_start=693
_OVERWINDOW._serialized_end=1042
_OVERWINDOW_WINDOWTYPE._serialized_start=834
_OVERWINDOW_WINDOWTYPE._serialized_end=1042
_USERDEFINEDAGGREGATEFUNCTION._serialized_start=1045
_USERDEFINEDAGGREGATEFUNCTION._serialized_end=1824
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC._serialized_start=1310
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC._serialized_end=1824
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_LISTVIEW._serialized_start=1573
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_LISTVIEW._serialized_end=1657
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_MAPVIEW._serialized_start=1660
_USERDEFINEDAGGREGATEFUNCTION_DATAVIEWSPEC_MAPVIEW._serialized_end=1811
_GROUPWINDOW._serialized_start=1827
_GROUPWINDOW._serialized_end=2383
_GROUPWINDOW_WINDOWTYPE._serialized_start=2191
_GROUPWINDOW_WINDOWTYPE._serialized_end=2282
_GROUPWINDOW_WINDOWPROPERTY._serialized_start=2284
_GROUPWINDOW_WINDOWPROPERTY._serialized_end=2383
_USERDEFINEDAGGREGATEFUNCTIONS._serialized_start=2386
_USERDEFINEDAGGREGATEFUNCTIONS._serialized_end=2992
_SCHEMA._serialized_start=2995
_SCHEMA._serialized_end=5033
_SCHEMA_MAPINFO._serialized_start=3070
_SCHEMA_MAPINFO._serialized_end=3221
_SCHEMA_TIMEINFO._serialized_start=3223
_SCHEMA_TIMEINFO._serialized_end=3252
_SCHEMA_TIMESTAMPINFO._serialized_start=3254
_SCHEMA_TIMESTAMPINFO._serialized_end=3288
_SCHEMA_LOCALZONEDTIMESTAMPINFO._serialized_start=3290
_SCHEMA_LOCALZONEDTIMESTAMPINFO._serialized_end=3334
_SCHEMA_ZONEDTIMESTAMPINFO._serialized_start=3336
_SCHEMA_ZONEDTIMESTAMPINFO._serialized_end=3375
_SCHEMA_DECIMALINFO._serialized_start=3377
_SCHEMA_DECIMALINFO._serialized_end=3424
_SCHEMA_BINARYINFO._serialized_start=3426
_SCHEMA_BINARYINFO._serialized_end=3454
_SCHEMA_VARBINARYINFO._serialized_start=3456
_SCHEMA_VARBINARYINFO._serialized_end=3487
_SCHEMA_CHARINFO._serialized_start=3489
_SCHEMA_CHARINFO._serialized_end=3515
_SCHEMA_VARCHARINFO._serialized_start=3517
_SCHEMA_VARCHARINFO._serialized_end=3546
_SCHEMA_FIELDTYPE._serialized_start=3549
_SCHEMA_FIELDTYPE._serialized_end=4621
_SCHEMA_FIELD._serialized_start=4623
_SCHEMA_FIELD._serialized_end=4731
_SCHEMA_TYPENAME._serialized_start=4734
_SCHEMA_TYPENAME._serialized_end=5033
_TYPEINFO._serialized_start=5036
_TYPEINFO._serialized_end=6383
_TYPEINFO_MAPTYPEINFO._serialized_start=5530
_TYPEINFO_MAPTYPEINFO._serialized_end=5669
_TYPEINFO_ROWTYPEINFO._serialized_start=5672
_TYPEINFO_ROWTYPEINFO._serialized_end=5856
_TYPEINFO_ROWTYPEINFO_FIELD._serialized_start=5765
_TYPEINFO_ROWTYPEINFO_FIELD._serialized_end=5856
_TYPEINFO_TUPLETYPEINFO._serialized_start=5858
_TYPEINFO_TUPLETYPEINFO._serialized_end=5938
_TYPEINFO_AVROTYPEINFO._serialized_start=5940
_TYPEINFO_AVROTYPEINFO._serialized_end=5970
_TYPEINFO_TYPENAME._serialized_start=5973
_TYPEINFO_TYPENAME._serialized_end=6370
_USERDEFINEDDATASTREAMFUNCTION._serialized_start=6386
_USERDEFINEDDATASTREAMFUNCTION._serialized_end=7363
_USERDEFINEDDATASTREAMFUNCTION_RUNTIMECONTEXT._serialized_start=6881
_USERDEFINEDDATASTREAMFUNCTION_RUNTIMECONTEXT._serialized_end=7187
_USERDEFINEDDATASTREAMFUNCTION_FUNCTIONTYPE._serialized_start=7190
_USERDEFINEDDATASTREAMFUNCTION_FUNCTIONTYPE._serialized_end=7363
_STATEDESCRIPTOR._serialized_start=7366
_STATEDESCRIPTOR._serialized_end=9258
_STATEDESCRIPTOR_STATETTLCONFIG._serialized_start=7498
_STATEDESCRIPTOR_STATETTLCONFIG._serialized_end=9258
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES._serialized_start=7969
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES._serialized_end=9067
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_INCREMENTALCLEANUPSTRATEGY._serialized_start=8147
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_INCREMENTALCLEANUPSTRATEGY._serialized_end=8235
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_ROCKSDBCOMPACTFILTERCLEANUPSTRATEGY._serialized_start=8237
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_ROCKSDBCOMPACTFILTERCLEANUPSTRATEGY._serialized_end=8312
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_MAPSTRATEGIESENTRY._serialized_start=8315
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_MAPSTRATEGIESENTRY._serialized_end=8923
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_STRATEGIES._serialized_start=8925
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_STRATEGIES._serialized_end=9023
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_EMPTYCLEANUPSTRATEGY._serialized_start=9025
_STATEDESCRIPTOR_STATETTLCONFIG_CLEANUPSTRATEGIES_EMPTYCLEANUPSTRATEGY._serialized_end=9067
_STATEDESCRIPTOR_STATETTLCONFIG_UPDATETYPE._serialized_start=9069
_STATEDESCRIPTOR_STATETTLCONFIG_UPDATETYPE._serialized_end=9137
_STATEDESCRIPTOR_STATETTLCONFIG_STATEVISIBILITY._serialized_start=9139
_STATEDESCRIPTOR_STATETTLCONFIG_STATEVISIBILITY._serialized_end=9213
_STATEDESCRIPTOR_STATETTLCONFIG_TTLTIMECHARACTERISTIC._serialized_start=9215
_STATEDESCRIPTOR_STATETTLCONFIG_TTLTIMECHARACTERISTIC._serialized_end=9258
_CODERINFODESCRIPTOR._serialized_start=9261
_CODERINFODESCRIPTOR._serialized_end=10270
_CODERINFODESCRIPTOR_FLATTENROWTYPE._serialized_start=9854
_CODERINFODESCRIPTOR_FLATTENROWTYPE._serialized_end=9928
_CODERINFODESCRIPTOR_ROWTYPE._serialized_start=9930
_CODERINFODESCRIPTOR_ROWTYPE._serialized_end=9997
_CODERINFODESCRIPTOR_ARROWTYPE._serialized_start=9999
_CODERINFODESCRIPTOR_ARROWTYPE._serialized_end=10068
_CODERINFODESCRIPTOR_OVERWINDOWARROWTYPE._serialized_start=10070
_CODERINFODESCRIPTOR_OVERWINDOWARROWTYPE._serialized_end=10149
_CODERINFODESCRIPTOR_RAWTYPE._serialized_start=10151
_CODERINFODESCRIPTOR_RAWTYPE._serialized_end=10223
_CODERINFODESCRIPTOR_MODE._serialized_start=10225
_CODERINFODESCRIPTOR_MODE._serialized_end=10257
# @@protoc_insertion_point(module_scope)
| 44,590 | 80.074545 | 15,742 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/profiler.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import cProfile
import pstats
class Profiler(object):
def __init__(self):
self._pr = cProfile.Profile()
def start(self):
self._pr.enable()
def close(self):
self._pr.disable()
ps = pstats.Stats(self._pr).sort_stats('cumulative')
ps.print_stats()
| 1,260 | 36.088235 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/stream_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import struct
class InputStream(object):
"""
A pure Python implementation of InputStream.
"""
def __init__(self, data):
self.data = data
self.pos = 0
def read(self, size):
self.pos += size
return self.data[self.pos - size: self.pos]
def read_byte(self):
self.pos += 1
return self.data[self.pos - 1]
def read_int8(self):
return struct.unpack('b', self.read(1))[0]
def read_int16(self):
return struct.unpack('>h', self.read(2))[0]
def read_int32(self):
return struct.unpack('>i', self.read(4))[0]
def read_int64(self):
return struct.unpack('>q', self.read(8))[0]
def read_float(self):
return struct.unpack('>f', self.read(4))[0]
def read_double(self):
return struct.unpack('>d', self.read(8))[0]
def read_bytes(self):
size = self.read_int32()
return self.read(size)
def read_var_int64(self):
shift = 0
result = 0
while True:
byte = self.read_byte()
if byte < 0:
raise RuntimeError('VarLong not terminated.')
bits = byte & 0x7F
if shift >= 64 or (shift >= 63 and bits > 1):
raise RuntimeError('VarLong too long.')
result |= bits << shift
shift += 7
if not byte & 0x80:
break
if result >= 1 << 63:
result -= 1 << 64
return result
def size(self):
return len(self.data) - self.pos
class OutputStream(object):
"""
A pure Python implementation of OutputStream.
"""
def __init__(self):
self.data = []
self.byte_count = 0
def write(self, b: bytes):
self.data.append(b)
self.byte_count += len(b)
def write_byte(self, v):
self.data.append(chr(v).encode('latin-1'))
self.byte_count += 1
def write_int8(self, v: int):
self.write(struct.pack('b', v))
def write_int16(self, v: int):
self.write(struct.pack('>h', v))
def write_int32(self, v: int):
self.write(struct.pack('>i', v))
def write_int64(self, v: int):
self.write(struct.pack('>q', v))
def write_float(self, v: float):
self.write(struct.pack('>f', v))
def write_double(self, v: float):
self.write(struct.pack('>d', v))
def write_bytes(self, v: bytes, size: int):
self.write_int32(size)
self.write(v[:size])
def write_var_int64(self, v: int):
if v < 0:
v += 1 << 64
if v <= 0:
raise ValueError('Value too large (negative).')
while True:
bits = v & 0x7F
v >>= 7
if v:
bits |= 0x80
self.data.append(chr(bits).encode('latin-1'))
self.byte_count += 1
if not v:
break
def get(self) -> bytes:
return b''.join(self.data)
def size(self) -> int:
return self.byte_count
def clear(self):
self.data.clear()
self.byte_count = 0
| 4,081 | 26.958904 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/formats/avro.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import struct
from avro.io import (
AvroTypeException,
BinaryDecoder,
BinaryEncoder,
DatumReader,
DatumWriter,
SchemaResolutionException,
Validate,
)
STRUCT_FLOAT = struct.Struct('>f') # big-endian float
STRUCT_DOUBLE = struct.Struct('>d') # big-endian double
STRUCT_INT = struct.Struct('>i') # big-endian int
STRUCT_LONG_LONG = struct.Struct('>q') # big-endian long long
class FlinkAvroBufferWrapper(object):
def __init__(self):
self._stream = None
def switch_stream(self, stream):
self._stream = stream
def read(self, n=1):
return self._stream.read(n)
def write(self, data):
return self._stream.write(data)
class FlinkAvroDecoder(BinaryDecoder):
"""
Flink-customized basic type decoder, with some different encodings, e.g. Default avro encode int
and long into variable-sized bytes, while Flink use fixed bytes in some places.
"""
def __init__(self, reader):
super().__init__(reader)
def read_int(self):
return STRUCT_INT.unpack(self.read(4))[0]
def read_long(self):
return STRUCT_LONG_LONG.unpack(self.read(8))[0]
def read_var_long(self):
"""
Flink implementation of variable-sized long serialization does not move sign to lower bit
and flip the rest.
"""
b = ord(self.read(1))
n = b & 0x7F
shift = 7
while (b & 0x80) != 0:
b = ord(self.read(1))
n |= (b & 0x7F) << shift
shift += 7
return n
def read_float(self):
return STRUCT_FLOAT.unpack(self.read(4))[0]
def read_double(self):
return STRUCT_DOUBLE.unpack(self.read(8))[0]
def read_bytes(self):
nbytes = self.read_int()
assert (nbytes >= 0), nbytes
return self.read(nbytes)
def skip_int(self):
self.skip(4)
def skip_long(self):
self.skip(8)
def skip_bytes(self):
nbytes = self.read_int()
assert (nbytes >= 0), nbytes
self.skip(nbytes)
class FlinkAvroDatumReader(DatumReader):
"""
Flink-customized datum reader to parse composite data structure, to fit with changed basic type
serialization in FlinkAvroDecoder.
"""
def __init__(self, writer_schema=None, reader_schema=None):
super().__init__(writer_schema, reader_schema)
def read_array(self, writer_schema, reader_schema, decoder: 'FlinkAvroDecoder'):
read_items = []
block_count = decoder.read_var_long()
assert block_count >= 0
if block_count == 0:
return read_items
for i in range(block_count):
read_items.append(self.read_data(writer_schema.items,
reader_schema.items, decoder))
decoder.read_var_long()
return read_items
def skip_array(self, writer_schema, decoder: 'FlinkAvroDecoder'):
block_count = decoder.read_var_long()
assert block_count >= 0
if block_count == 0:
return
for i in range(block_count):
self.skip_data(writer_schema.items, decoder)
decoder.read_var_long()
def read_map(self, writer_schema, reader_schema, decoder: 'FlinkAvroDecoder'):
read_items = {}
block_count = decoder.read_var_long()
assert block_count >= 0
if block_count == 0:
return read_items
for i in range(block_count):
key = decoder.read_utf8()
read_items[key] = self.read_data(writer_schema.values,
reader_schema.values, decoder)
decoder.read_var_long()
return read_items
def skip_map(self, writer_schema, decoder: 'FlinkAvroDecoder'):
block_count = decoder.read_var_long()
assert block_count >= 0
if block_count == 0:
return
for i in range(block_count):
decoder.skip_utf8()
self.skip_data(writer_schema.values, decoder)
decoder.read_long()
def read_union(self, writer_schema, reader_schema, decoder: 'FlinkAvroDecoder'):
index_of_schema = int(decoder.read_int())
if index_of_schema >= len(writer_schema.schemas):
fail_msg = "Can't access branch index %d for union with %d branches" \
% (index_of_schema, len(writer_schema.schemas))
raise SchemaResolutionException(fail_msg, writer_schema, reader_schema)
selected_writer_schema = writer_schema.schemas[index_of_schema]
return self.read_data(selected_writer_schema, reader_schema, decoder)
def skip_union(self, writer_schema, decoder):
index_of_schema = int(decoder.read_int())
if index_of_schema >= len(writer_schema.schemas):
fail_msg = "Can't access branch index %d for union with %d branches" \
% (index_of_schema, len(writer_schema.schemas))
raise SchemaResolutionException(fail_msg, writer_schema)
return self.skip_data(writer_schema.schemas[index_of_schema], decoder)
class FlinkAvroEncoder(BinaryEncoder):
def __init__(self, writer):
super().__init__(writer)
def write_int(self, datum):
self.write(STRUCT_INT.pack(datum))
def write_long(self, datum):
self.write(STRUCT_LONG_LONG.pack(datum))
def write_var_long(self, datum):
while datum & 0x80 != 0:
self.write((datum & 0x80).to_bytes(1, 'big'))
datum <<= 7
self.write(datum.to_bytes(1, 'big'))
def write_float(self, datum):
self.write(STRUCT_FLOAT.pack(datum))
def write_double(self, datum):
self.write(STRUCT_DOUBLE.pack(datum))
def write_bytes(self, datum):
self.write_int(len(datum))
self.write(datum)
class FlinkAvroDatumWriter(DatumWriter):
def __init__(self, writer_schema=None):
super().__init__(writer_schema=writer_schema)
def write_array(self, writer_schema, datum, encoder):
if len(datum) > 0:
encoder.write_var_long(len(datum))
for item in datum:
self.write_data(writer_schema.items, item, encoder)
encoder.write_long(0)
def write_map(self, writer_schema, datum, encoder):
if len(datum) > 0:
encoder.write_var_long(len(datum))
for key, val in datum.items():
encoder.write_utf8(key)
self.write_data(writer_schema.values, val, encoder)
encoder.write_var_long(0)
def write_union(self, writer_schema, datum, encoder):
# resolve union
index_of_schema = -1
for i, candidate_schema in enumerate(writer_schema.schemas):
if Validate(candidate_schema, datum):
index_of_schema = i
if index_of_schema < 0:
raise AvroTypeException(writer_schema, datum)
# write data
encoder.write_int(index_of_schema)
self.write_data(writer_schema.schemas[index_of_schema], datum, encoder)
| 8,000 | 33.046809 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/formats/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from .avro import FlinkAvroDecoder, FlinkAvroDatumReader
__all__ = [
"FlinkAvroDatumReader",
"FlinkAvroDecoder"
]
| 1,081 | 44.083333 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/timerservice.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import abstractmethod, ABC
from typing import Generic, TypeVar
K = TypeVar('K')
N = TypeVar('N')
class InternalTimerService(Generic[N], ABC):
"""
Interface for working with time and timers.
This is the internal version of TimerService that allows to specify a key and a namespace to
which timers should be scoped.
"""
@abstractmethod
def current_processing_time(self):
"""
Returns the current processing time.
"""
pass
@abstractmethod
def current_watermark(self):
"""
Returns the current event-time watermark.
"""
pass
@abstractmethod
def register_processing_time_timer(self, namespace: N, t: int):
"""
Registers a timer to be fired when processing time passes the given time. The namespace you
pass here will be provided when the timer fires.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The processing time of the timer to be registered.
"""
pass
@abstractmethod
def register_event_time_timer(self, namespace: N, t: int):
"""
Registers a timer to be fired when event time watermark passes the given time. The namespace
you pass here will be provided when the timer fires.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The event time of the timer to be registered.
"""
pass
def delete_processing_time_timer(self, namespace: N, t: int):
"""
Deletes the timer for the given key and namespace.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The given trigger time of timer to be deleted.
"""
pass
def delete_event_time_timer(self, namespace: N, t: int):
"""
Deletes the timer for the given key and namespace.
:param namespace: The namespace you pass here will be provided when the timer fires.
:param t: The given trigger time of timer to be deleted.
"""
pass
class InternalTimer(Generic[K, N], ABC):
@abstractmethod
def get_timestamp(self) -> int:
"""
Returns the timestamp of the timer. This value determines the point in time when the timer
will fire.
"""
pass
@abstractmethod
def get_key(self) -> K:
"""
Returns the key that is bound to this timer.
"""
pass
@abstractmethod
def get_namespace(self) -> N:
"""
Returns the namespace that is bound to this timer.
:return:
"""
pass
| 3,681 | 31.584071 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC
DATA_STREAM_STATELESS_FUNCTION_URN = "flink:transform:ds:stateless_function:v1"
DATA_STREAM_STATEFUL_FUNCTION_URN = "flink:transform:ds:stateful_function:v1"
class Operation(ABC):
def open(self) -> None:
pass
def close(self) -> None:
pass
class OneInputOperation(ABC):
def process_element(self, value):
raise NotImplementedError
class TwoInputOperation(ABC):
def process_element1(self, value):
raise NotImplementedError
def process_element2(self, value):
raise NotImplementedError
| 1,530 | 33.022222 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/process/timerservice_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import collections
import time
from enum import Enum
from io import BytesIO
from pyflink.common import Row
from pyflink.datastream import TimerService
from pyflink.fn_execution.datastream.timerservice import InternalTimer, K, N, InternalTimerService
class TimerOperandType(Enum):
REGISTER_EVENT_TIMER = 0
REGISTER_PROC_TIMER = 1
DELETE_EVENT_TIMER = 2
DELETE_PROC_TIMER = 3
class LegacyInternalTimerServiceImpl(InternalTimerService[N]):
"""
Internal implementation of InternalTimerService.
TODO: Use InternalTimerServiceImpl instead.
"""
def __init__(self, keyed_state_backend):
self._keyed_state_backend = keyed_state_backend
self._current_watermark = None
self._timers = collections.OrderedDict()
def current_processing_time(self):
return int(time.time() * 1000)
def current_watermark(self):
return self._current_watermark
def advance_watermark(self, watermark: int):
self._current_watermark = watermark
def register_processing_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.REGISTER_PROC_TIMER, InternalTimerImpl(t, current_key, namespace))
self._timers[timer] = None
def register_event_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.REGISTER_EVENT_TIMER,
InternalTimerImpl(t, current_key, namespace))
self._timers[timer] = None
def delete_processing_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.DELETE_PROC_TIMER, InternalTimerImpl(t, current_key, namespace))
self._timers[timer] = None
def delete_event_time_timer(self, namespace: N, t: int):
current_key = self._keyed_state_backend.get_current_key()
timer = (TimerOperandType.DELETE_EVENT_TIMER, InternalTimerImpl(t, current_key, namespace))
self._timers[timer] = None
class InternalTimerServiceImpl(InternalTimerService[N]):
"""
Internal implementation of InternalTimerService.
"""
def __init__(self, keyed_state_backend):
self._keyed_state_backend = keyed_state_backend
self._current_watermark = None
self._timer_coder_impl = None
self._output_stream = None
from apache_beam.transforms.window import GlobalWindow
self._global_window = GlobalWindow()
def add_timer_info(self, timer_info):
self._timer_coder_impl = timer_info.timer_coder_impl
self._output_stream = timer_info.output_stream
def set_namespace_serializer(self, namespace_serializer):
self._namespace_serializer = namespace_serializer
def current_processing_time(self):
return int(time.time() * 1000)
def current_watermark(self):
return self._current_watermark
def advance_watermark(self, watermark: int):
self._current_watermark = watermark
def register_processing_time_timer(self, namespace: N, ts: int):
current_key = self._keyed_state_backend.get_current_key()
self._set_timer(TimerOperandType.REGISTER_PROC_TIMER, ts, current_key, namespace)
def register_event_time_timer(self, namespace: N, ts: int):
current_key = self._keyed_state_backend.get_current_key()
self._set_timer(TimerOperandType.REGISTER_EVENT_TIMER, ts, current_key, namespace)
def delete_processing_time_timer(self, namespace: N, ts: int):
current_key = self._keyed_state_backend.get_current_key()
self._set_timer(TimerOperandType.DELETE_PROC_TIMER, ts, current_key, namespace)
def delete_event_time_timer(self, namespace: N, ts: int):
current_key = self._keyed_state_backend.get_current_key()
self._set_timer(TimerOperandType.DELETE_EVENT_TIMER, ts, current_key, namespace)
def _set_timer(self, timer_operation_type, ts, key, namespace):
from apache_beam.transforms import userstate
bytes_io = BytesIO()
self._namespace_serializer.serialize(namespace, bytes_io)
encoded_namespace = bytes_io.getvalue()
timer_operand_type_value = timer_operation_type.value
timer_data = Row(timer_operand_type_value, -1, ts, key, encoded_namespace)
timer = userstate.Timer(
user_key=timer_data,
dynamic_timer_tag='',
windows=(self._global_window, ),
clear_bit=True,
fire_timestamp=None,
hold_timestamp=None,
paneinfo=None)
self._timer_coder_impl.encode_to_stream(timer, self._output_stream, True)
self._timer_coder_impl._key_coder_impl._value_coder._output_stream.maybe_flush()
class TimerServiceImpl(TimerService):
"""
Internal implementation of TimerService.
"""
def __init__(self, internal_timer_service: InternalTimerServiceImpl):
self._internal = internal_timer_service
def current_processing_time(self) -> int:
return self._internal.current_processing_time()
def current_watermark(self) -> int:
return self._internal.current_watermark()
def advance_watermark(self, wm: int):
self._internal.advance_watermark(wm)
def register_processing_time_timer(self, t: int):
self._internal.register_processing_time_timer(None, t)
def register_event_time_timer(self, t: int):
self._internal.register_event_time_timer(None, t)
def delete_processing_time_timer(self, t: int):
self._internal.delete_processing_time_timer(None, t)
def delete_event_time_timer(self, t: int):
self._internal.delete_event_time_timer(None, t)
class NonKeyedTimerServiceImpl(TimerService):
"""
Internal implementation of TimerService for ProcessFunction and CoProcessFunction.
"""
def __init__(self):
self._current_watermark = None
def current_processing_time(self) -> int:
return int(time.time() * 1000)
def current_watermark(self):
return self._current_watermark
def advance_watermark(self, wm):
self._current_watermark = wm
def register_processing_time_timer(self, t: int):
raise Exception("Register timers is only supported on a keyed stream.")
def register_event_time_timer(self, t: int):
raise Exception("Register timers is only supported on a keyed stream.")
def delete_processing_time_timer(self, t: int):
raise Exception("Deleting timers is only supported on a keyed streams.")
def delete_event_time_timer(self, t: int):
raise Exception("Deleting timers is only supported on a keyed streams.")
class InternalTimerImpl(InternalTimer[K, N]):
def __init__(self, timestamp: int, key: K, namespace: N):
self._timestamp = timestamp
self._key = key
self._namespace = namespace
def get_timestamp(self) -> int:
return self._timestamp
def get_key(self) -> K:
return self._key
def get_namespace(self) -> N:
return self._namespace
def __hash__(self):
result = int(self._timestamp ^ (self._timestamp >> 32))
result = 31 * result + hash(tuple(self._key))
result = 31 * result + hash(self._namespace)
return result
def __eq__(self, other):
return self.__class__ == other.__class__ and self._timestamp == other._timestamp \
and self._key == other._key and self._namespace == other._namespace
| 8,511 | 35.848485 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/process/process_function.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC
from typing import cast
from pyflink.datastream import TimerService, TimeDomain
from pyflink.datastream.functions import KeyedProcessFunction, KeyedCoProcessFunction, \
ProcessFunction, CoProcessFunction, BroadcastProcessFunction, KeyedBroadcastProcessFunction, \
BaseBroadcastProcessFunction
from pyflink.datastream.state import MapStateDescriptor, BroadcastState, ReadOnlyBroadcastState, \
OperatorStateStore
from pyflink.fn_execution.internal_state import InternalBroadcastState
class InternalKeyedProcessFunctionOnTimerContext(
KeyedProcessFunction.OnTimerContext, KeyedCoProcessFunction.OnTimerContext):
"""
Internal implementation of OnTimerContext of KeyedProcessFunction and KeyedCoProcessFunction.
"""
def __init__(self, timer_service: TimerService):
self._timer_service = timer_service
self._time_domain = None
self._timestamp = None
self._current_key = None
def get_current_key(self):
return self._current_key
def set_current_key(self, current_key):
self._current_key = current_key
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
def time_domain(self) -> TimeDomain:
return self._time_domain
def set_time_domain(self, td: TimeDomain):
self._time_domain = td
class InternalKeyedProcessFunctionContext(
KeyedProcessFunction.Context, KeyedCoProcessFunction.Context):
"""
Internal implementation of Context of KeyedProcessFunction and KeyedCoProcessFunction.
"""
def __init__(self, timer_service: TimerService):
self._timer_service = timer_service
self._timestamp = None
self._current_key = None
def get_current_key(self):
return self._current_key
def set_current_key(self, current_key):
self._current_key = current_key
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
class InternalProcessFunctionContext(ProcessFunction.Context, CoProcessFunction.Context):
"""
Internal implementation of ProcessFunction.Context and CoProcessFunction.Context.
"""
def __init__(self, timer_service: TimerService):
self._timer_service = timer_service
self._timestamp = None
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
class InternalBaseBroadcastProcessFunctionContext(BaseBroadcastProcessFunction.Context, ABC):
def __init__(self, timer_service: TimerService, operator_state_store: OperatorStateStore):
self._timer_service = timer_service
self._timestamp = None
self._operator_state_store = operator_state_store
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
def current_processing_time(self) -> int:
return self._timer_service.current_processing_time()
def current_watermark(self) -> int:
return self._timer_service.current_watermark()
class InternalBroadcastProcessFunctionContext(InternalBaseBroadcastProcessFunctionContext,
BroadcastProcessFunction.Context):
def __init__(self, timer_service: TimerService, operator_state_store: OperatorStateStore):
InternalBaseBroadcastProcessFunctionContext.__init__(
self, timer_service, operator_state_store
)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> BroadcastState:
return self._operator_state_store.get_broadcast_state(state_descriptor)
class InternalBroadcastProcessFunctionReadOnlyContext(InternalBaseBroadcastProcessFunctionContext,
BroadcastProcessFunction.ReadOnlyContext):
def __init__(self, timer_service: TimerService, operator_state_store: OperatorStateStore):
InternalBaseBroadcastProcessFunctionContext.__init__(
self, timer_service, operator_state_store
)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> ReadOnlyBroadcastState:
return cast(
InternalBroadcastState,
self._operator_state_store.get_broadcast_state(state_descriptor)
).to_read_only_broadcast_state()
class InternalKeyedBroadcastProcessFunctionContext(InternalBaseBroadcastProcessFunctionContext,
KeyedBroadcastProcessFunction.Context):
def __init__(self, timer_service: TimerService, operator_state_store: OperatorStateStore):
InternalBaseBroadcastProcessFunctionContext.__init__(
self, timer_service, operator_state_store
)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> BroadcastState:
return self._operator_state_store.get_broadcast_state(state_descriptor)
class InternalKeyedBroadcastProcessFunctionReadOnlyContext(
InternalBaseBroadcastProcessFunctionContext,
KeyedBroadcastProcessFunction.ReadOnlyContext):
def __init__(self, timer_service: TimerService, operator_state_store: OperatorStateStore):
InternalBaseBroadcastProcessFunctionContext.__init__(
self, timer_service, operator_state_store
)
self._current_key = None
def set_current_key(self, key):
self._current_key = key
def get_current_key(self):
return self._current_key
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> ReadOnlyBroadcastState:
return cast(
InternalBroadcastState,
self._operator_state_store.get_broadcast_state(state_descriptor)
).to_read_only_broadcast_state()
class InternalKeyedBroadcastProcessFunctionOnTimerContext(
InternalBaseBroadcastProcessFunctionContext,
KeyedBroadcastProcessFunction.OnTimerContext):
def __init__(self, timer_service: TimerService, operator_state_store: OperatorStateStore):
InternalBaseBroadcastProcessFunctionContext.__init__(
self, timer_service, operator_state_store
)
self._current_key = None
self._time_domain = None
def set_current_key(self, key):
self._current_key = key
def get_current_key(self):
return self._current_key
def set_time_domain(self, time_domain):
self._time_domain = time_domain
def time_domain(self):
return self._time_domain
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> ReadOnlyBroadcastState:
return cast(
InternalBroadcastState,
self._operator_state_store.get_broadcast_state(state_descriptor)
).to_read_only_broadcast_state()
| 8,155 | 35.573991 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/process/runtime_context.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict
from pyflink.datastream import RuntimeContext
from pyflink.datastream.state import ValueStateDescriptor, ValueState, ListStateDescriptor, \
ListState, MapStateDescriptor, MapState, ReducingStateDescriptor, ReducingState, \
AggregatingStateDescriptor, AggregatingState
from pyflink.fn_execution.coders import from_type_info, MapCoder, GenericArrayCoder
from pyflink.metrics import MetricGroup
class StreamingRuntimeContext(RuntimeContext):
def __init__(self,
task_name: str,
task_name_with_subtasks: str,
number_of_parallel_subtasks: int,
max_number_of_parallel_subtasks: int,
index_of_this_subtask: int,
attempt_number: int,
job_parameters: Dict[str, str],
metric_group: MetricGroup,
keyed_state_backend,
in_batch_execution_mode: bool):
self._task_name = task_name
self._task_name_with_subtasks = task_name_with_subtasks
self._number_of_parallel_subtasks = number_of_parallel_subtasks
self._max_number_of_parallel_subtasks = max_number_of_parallel_subtasks
self._index_of_this_subtask = index_of_this_subtask
self._attempt_number = attempt_number
self._job_parameters = job_parameters
self._metric_group = metric_group
self._keyed_state_backend = keyed_state_backend
self._in_batch_execution_mode = in_batch_execution_mode
def get_task_name(self) -> str:
"""
Returns the name of the task in which the UDF runs, as assigned during plan construction.
"""
return self._task_name
def get_number_of_parallel_subtasks(self) -> int:
"""
Gets the parallelism with which the parallel task runs.
"""
return self._number_of_parallel_subtasks
def get_max_number_of_parallel_subtasks(self) -> int:
"""
Gets the number of max-parallelism with which the parallel task runs.
"""
return self._max_number_of_parallel_subtasks
def get_index_of_this_subtask(self) -> int:
"""
Gets the number of this parallel subtask. The numbering starts from 0 and goes up to
parallelism-1 (parallelism as returned by
:func:`~RuntimeContext.get_number_of_parallel_subtasks`).
"""
return self._index_of_this_subtask
def get_attempt_number(self) -> int:
"""
Gets the attempt number of this parallel subtask. First attempt is numbered 0.
"""
return self._attempt_number
def get_task_name_with_subtasks(self) -> str:
"""
Returns the name of the task, appended with the subtask indicator, such as "MyTask (3/6)",
where 3 would be (:func:`~RuntimeContext.get_index_of_this_subtask` + 1), and 6 would be
:func:`~RuntimeContext.get_number_of_parallel_subtasks`.
"""
return self._task_name_with_subtasks
def get_job_parameter(self, key: str, default_value: str):
"""
Gets the global job parameter value associated with the given key as a string.
"""
return self._job_parameters[key] if key in self._job_parameters else default_value
def get_metrics_group(self) -> MetricGroup:
"""
Gets the metric group.
"""
return self._metric_group
def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:
if self._keyed_state_backend:
return self._keyed_state_backend.get_value_state(
state_descriptor.name,
from_type_info(state_descriptor.type_info),
state_descriptor._ttl_config)
else:
raise Exception("This state is only accessible by functions executed on a KeyedStream.")
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:
if self._keyed_state_backend:
array_coder = from_type_info(state_descriptor.type_info) # type: GenericArrayCoder
return self._keyed_state_backend.get_list_state(
state_descriptor.name,
array_coder._elem_coder,
state_descriptor._ttl_config)
else:
raise Exception("This state is only accessible by functions executed on a KeyedStream.")
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:
if self._keyed_state_backend:
map_coder = from_type_info(state_descriptor.type_info) # type: MapCoder
key_coder = map_coder._key_coder
value_coder = map_coder._value_coder
return self._keyed_state_backend.get_map_state(
state_descriptor.name,
key_coder,
value_coder,
state_descriptor._ttl_config)
else:
raise Exception("This state is only accessible by functions executed on a KeyedStream.")
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:
if self._keyed_state_backend:
return self._keyed_state_backend.get_reducing_state(
state_descriptor.get_name(),
from_type_info(state_descriptor.type_info),
state_descriptor.get_reduce_function(),
state_descriptor._ttl_config)
else:
raise Exception("This state is only accessible by functions executed on a KeyedStream.")
def get_aggregating_state(
self, state_descriptor: AggregatingStateDescriptor) -> AggregatingState:
if self._keyed_state_backend:
return self._keyed_state_backend.get_aggregating_state(
state_descriptor.get_name(),
from_type_info(state_descriptor.type_info),
state_descriptor.get_agg_function(),
state_descriptor._ttl_config)
else:
raise Exception("This state is only accessible by functions executed on a KeyedStream.")
@staticmethod
def of(runtime_context_proto, metric_group, keyed_state_backend=None):
return StreamingRuntimeContext(
runtime_context_proto.task_name,
runtime_context_proto.task_name_with_subtasks,
runtime_context_proto.number_of_parallel_subtasks,
runtime_context_proto.max_number_of_parallel_subtasks,
runtime_context_proto.index_of_this_subtask,
runtime_context_proto.attempt_number,
{p.key: p.value for p in runtime_context_proto.job_parameters},
metric_group,
keyed_state_backend,
runtime_context_proto.in_batch_execution_mode)
| 7,669 | 44.117647 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/process/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/process/input_handler.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC
from enum import Enum
from typing import cast, Iterable
from pyflink.common import Row
from pyflink.common.constants import DEFAULT_OUTPUT_TAG
from pyflink.datastream.output_tag import OutputTag
from pyflink.fn_execution.datastream.process.timerservice_impl import InternalTimerServiceImpl
class TimerType(Enum):
EVENT_TIME = 0
PROCESSING_TIME = 1
class RunnerInputHandler(ABC):
"""
Handler which handles normal input data.
"""
def __init__(
self,
internal_timer_service: InternalTimerServiceImpl,
process_element_func,
has_side_output: bool
):
self._internal_timer_service = internal_timer_service
self._process_element_func = process_element_func
self._has_side_output = has_side_output
def process_element(self, value) -> Iterable:
timestamp = value[0]
watermark = value[1]
data = value[2]
self._advance_watermark(watermark)
yield from _emit_results(timestamp,
watermark,
self._process_element_func(data, timestamp),
self._has_side_output)
def _advance_watermark(self, watermark: int) -> None:
self._internal_timer_service.advance_watermark(watermark)
class TimerHandler(ABC):
"""
Handler which handles normal input data.
"""
def __init__(
self,
internal_timer_service: InternalTimerServiceImpl,
on_event_time_func,
on_processing_time_func,
namespace_coder,
has_side_output
):
self._internal_timer_service = internal_timer_service
self._on_event_time_func = on_event_time_func
self._on_processing_time_func = on_processing_time_func
self._namespace_coder = namespace_coder
self._has_side_output = has_side_output
def process_timer(self, timer_data) -> Iterable:
timer_type = timer_data[0]
watermark = timer_data[1]
timestamp = timer_data[2]
key = timer_data[3]
serialized_namespace = timer_data[4]
self._advance_watermark(watermark)
if self._namespace_coder is not None:
namespace = self._namespace_coder.decode(serialized_namespace)
else:
namespace = None
if timer_type == TimerType.EVENT_TIME.value:
yield from _emit_results(
timestamp,
watermark,
self._on_event_time(timestamp, key, namespace),
self._has_side_output
)
elif timer_type == TimerType.PROCESSING_TIME.value:
yield from _emit_results(
timestamp,
watermark,
self._on_processing_time(timestamp, key, namespace),
self._has_side_output
)
else:
raise Exception("Unsupported timer type: %d" % timer_type)
def _on_event_time(self, timestamp, key, namespace) -> Iterable:
yield from self._on_event_time_func(timestamp, key, namespace)
def _on_processing_time(self, timestamp, key, namespace) -> Iterable:
yield from self._on_processing_time_func(timestamp, key, namespace)
def _advance_watermark(self, watermark: int) -> None:
self._internal_timer_service.advance_watermark(watermark)
def _emit_results(timestamp, watermark, results, has_side_output):
if results:
if has_side_output:
for result in results:
if isinstance(result, tuple) and isinstance(result[0], OutputTag):
yield cast(OutputTag, result[0]).tag_id, Row(
timestamp, watermark, result[1]
)
else:
yield DEFAULT_OUTPUT_TAG, Row(timestamp, watermark, result)
else:
for result in results:
yield Row(timestamp, watermark, result)
| 4,908 | 35.909774 | 94 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/process/operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
from typing import cast
from pyflink.common import Row
from pyflink.common.serializer import VoidNamespaceSerializer
from pyflink.datastream import TimeDomain, RuntimeContext
from pyflink.datastream.functions import BroadcastProcessFunction
from pyflink.datastream.window import WindowOperationDescriptor
from pyflink.fn_execution import pickle
from pyflink.fn_execution.datastream.process.input_handler import (
RunnerInputHandler,
TimerHandler,
_emit_results,
)
from pyflink.fn_execution.datastream import operations
from pyflink.fn_execution.datastream.process.runtime_context import StreamingRuntimeContext
from pyflink.fn_execution.datastream.process.process_function import (
InternalKeyedProcessFunctionOnTimerContext,
InternalKeyedProcessFunctionContext,
InternalProcessFunctionContext,
InternalBroadcastProcessFunctionContext,
InternalBroadcastProcessFunctionReadOnlyContext,
InternalKeyedBroadcastProcessFunctionContext,
InternalKeyedBroadcastProcessFunctionReadOnlyContext,
InternalKeyedBroadcastProcessFunctionOnTimerContext,
)
from pyflink.fn_execution.datastream.process.timerservice_impl import (
TimerServiceImpl,
InternalTimerServiceImpl,
NonKeyedTimerServiceImpl,
)
from pyflink.fn_execution.datastream.window.window_operator import WindowOperator
from pyflink.fn_execution.metrics.process.metric_impl import GenericMetricGroup
class Operation(operations.OneInputOperation, abc.ABC):
def __init__(self, serialized_fn, operator_state_backend=None):
if serialized_fn.metric_enabled:
self.base_metric_group = GenericMetricGroup(None, None)
else:
self.base_metric_group = None
self.operator_state_backend = operator_state_backend
def finish(self):
self._update_gauge(self.base_metric_group)
if self.operator_state_backend is not None:
self.operator_state_backend.commit()
def _update_gauge(self, base_metric_group):
if base_metric_group is not None:
for name in base_metric_group._flink_gauge:
flink_gauge = base_metric_group._flink_gauge[name]
beam_gauge = base_metric_group._beam_gauge[name]
beam_gauge.set(flink_gauge())
for sub_group in base_metric_group._sub_groups:
self._update_gauge(sub_group)
class StatelessOperation(Operation):
def __init__(self, serialized_fn, operator_state_backend):
super(StatelessOperation, self).__init__(serialized_fn, operator_state_backend)
(
self.open_func,
self.close_func,
self.process_element_func,
) = extract_stateless_function(
user_defined_function_proto=serialized_fn,
runtime_context=StreamingRuntimeContext.of(
serialized_fn.runtime_context, self.base_metric_group
),
operator_state_store=operator_state_backend,
)
def open(self):
self.open_func()
def close(self):
self.close_func()
def process_element(self, value):
return self.process_element_func(value)
class StatefulOperation(Operation):
def __init__(self, serialized_fn, keyed_state_backend, operator_state_backend):
super(StatefulOperation, self).__init__(serialized_fn, operator_state_backend)
self.keyed_state_backend = keyed_state_backend
(
self.open_func,
self.close_func,
self.process_element_func,
self.process_timer_func,
self.internal_timer_service,
) = extract_stateful_function(
user_defined_function_proto=serialized_fn,
runtime_context=StreamingRuntimeContext.of(
serialized_fn.runtime_context,
self.base_metric_group,
self.keyed_state_backend,
),
keyed_state_backend=self.keyed_state_backend,
operator_state_store=self.operator_state_backend,
)
def finish(self):
super().finish()
self.keyed_state_backend.commit()
def open(self):
self.open_func()
def close(self):
self.close_func()
def process_element(self, value):
return self.process_element_func(value)
def process_timer(self, timer_data):
return self.process_timer_func(timer_data)
def add_timer_info(self, timer_info):
self.internal_timer_service.add_timer_info(timer_info)
def extract_stateless_function(
user_defined_function_proto, runtime_context: RuntimeContext, operator_state_store
):
"""
Extracts user-defined-function from the proto representation of a
:class:`Function`.
:param user_defined_function_proto: the proto representation of the Python :class:`Function`
:param runtime_context: the streaming runtime context
:param operator_state_store: operator state store for getting broadcast states
"""
from pyflink.fn_execution import flink_fn_execution_pb2
func_type = user_defined_function_proto.function_type
has_side_output = user_defined_function_proto.has_side_output
UserDefinedDataStreamFunction = flink_fn_execution_pb2.UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.REVISE_OUTPUT:
def open_func():
pass
def close_func():
pass
def revise_output(value):
# VALUE[CURRENT_TIMESTAMP, CURRENT_WATERMARK, NORMAL_DATA]
timestamp = value[0]
element = value[2]
yield Row(timestamp, element)
process_element_func = revise_output
else:
user_defined_func = pickle.loads(user_defined_function_proto.payload)
def open_func():
if hasattr(user_defined_func, "open"):
user_defined_func.open(runtime_context)
def close_func():
if hasattr(user_defined_func, "close"):
user_defined_func.close()
if func_type == UserDefinedDataStreamFunction.PROCESS:
process_element = user_defined_func.process_element
ctx = InternalProcessFunctionContext(NonKeyedTimerServiceImpl())
def wrapped_func(value):
# VALUE[CURRENT_TIMESTAMP, CURRENT_WATERMARK, NORMAL_DATA]
timestamp = value[0]
watermark = value[1]
ctx.set_timestamp(timestamp)
ctx.timer_service().advance_watermark(watermark)
results = process_element(value[2], ctx)
yield from _emit_results(timestamp, watermark, results, has_side_output)
process_element_func = wrapped_func
elif func_type == UserDefinedDataStreamFunction.CO_PROCESS:
process_element1 = user_defined_func.process_element1
process_element2 = user_defined_func.process_element2
ctx = InternalProcessFunctionContext(NonKeyedTimerServiceImpl())
def wrapped_func(value):
# VALUE[CURRENT_TIMESTAMP, CURRENT_WATERMARK, [isLeft, leftInput, rightInput]]
timestamp = value[0]
watermark = value[1]
ctx.set_timestamp(timestamp)
ctx.timer_service().advance_watermark(watermark)
normal_data = value[2]
if normal_data[0]:
results = process_element1(normal_data[1], ctx)
else:
results = process_element2(normal_data[2], ctx)
yield from _emit_results(timestamp, watermark, results, has_side_output)
process_element_func = wrapped_func
elif func_type == UserDefinedDataStreamFunction.CO_BROADCAST_PROCESS:
user_defined_func = cast(BroadcastProcessFunction, user_defined_func)
process_element = user_defined_func.process_element
process_broadcast_element = user_defined_func.process_broadcast_element
broadcast_ctx = InternalBroadcastProcessFunctionContext(
NonKeyedTimerServiceImpl(), operator_state_store
)
read_only_broadcast_ctx = InternalBroadcastProcessFunctionReadOnlyContext(
NonKeyedTimerServiceImpl(), operator_state_store
)
def wrapped_func(value):
# VALUE[CURRENT_TIMESTAMP, CURRENT_WATERMARK,
# [isNormal, normalInput, broadcastInput]]
timestamp = value[0]
watermark = value[1]
broadcast_ctx.set_timestamp(timestamp)
cast(
TimerServiceImpl, broadcast_ctx.timer_service()
).advance_watermark(watermark)
read_only_broadcast_ctx.set_timestamp(timestamp)
cast(
TimerServiceImpl, read_only_broadcast_ctx.timer_service()
).advance_watermark(watermark)
data = value[2]
if data[0]:
results = process_element(data[1], read_only_broadcast_ctx)
else:
results = process_broadcast_element(data[2], broadcast_ctx)
yield from _emit_results(timestamp, watermark, results, has_side_output)
process_element_func = wrapped_func
else:
raise Exception("Unsupported function_type: " + str(func_type))
return open_func, close_func, process_element_func
def extract_stateful_function(
user_defined_function_proto, runtime_context: RuntimeContext, keyed_state_backend,
operator_state_store
):
from pyflink.fn_execution import flink_fn_execution_pb2
func_type = user_defined_function_proto.function_type
user_defined_func = pickle.loads(user_defined_function_proto.payload)
has_side_output = user_defined_function_proto.has_side_output
internal_timer_service = InternalTimerServiceImpl(keyed_state_backend)
def state_key_selector(normal_data):
return Row(normal_data[0])
def user_key_selector(normal_data):
return normal_data[0]
def input_selector(normal_data):
return normal_data[1]
UserDefinedDataStreamFunction = flink_fn_execution_pb2.UserDefinedDataStreamFunction
if func_type in (
UserDefinedDataStreamFunction.KEYED_PROCESS,
UserDefinedDataStreamFunction.KEYED_CO_PROCESS,
):
timer_service = TimerServiceImpl(internal_timer_service)
ctx = InternalKeyedProcessFunctionContext(timer_service)
on_timer_ctx = InternalKeyedProcessFunctionOnTimerContext(timer_service)
process_function = user_defined_func
internal_timer_service.set_namespace_serializer(VoidNamespaceSerializer())
def open_func():
if hasattr(process_function, "open"):
process_function.open(runtime_context)
def close_func():
if hasattr(process_function, "close"):
process_function.close()
def on_event_time(timestamp: int, key, namespace):
keyed_state_backend.set_current_key(key)
return _on_timer(TimeDomain.EVENT_TIME, timestamp, key)
def on_processing_time(timestamp: int, key, namespace):
keyed_state_backend.set_current_key(key)
return _on_timer(TimeDomain.PROCESSING_TIME, timestamp, key)
def _on_timer(time_domain: TimeDomain, timestamp: int, key):
user_current_key = user_key_selector(key)
on_timer_ctx.set_timestamp(timestamp)
on_timer_ctx.set_current_key(user_current_key)
on_timer_ctx.set_time_domain(time_domain)
return process_function.on_timer(timestamp, on_timer_ctx)
if func_type == UserDefinedDataStreamFunction.KEYED_PROCESS:
def process_element(normal_data, timestamp: int):
ctx.set_timestamp(timestamp)
ctx.set_current_key(user_key_selector(normal_data))
keyed_state_backend.set_current_key(state_key_selector(normal_data))
return process_function.process_element(
input_selector(normal_data), ctx
)
elif func_type == UserDefinedDataStreamFunction.KEYED_CO_PROCESS:
def process_element(normal_data, timestamp: int):
is_left = normal_data[0]
if is_left:
user_input = normal_data[1]
else:
user_input = normal_data[2]
ctx.set_timestamp(timestamp)
current_user_key = user_key_selector(user_input)
ctx.set_current_key(current_user_key)
on_timer_ctx.set_current_key(current_user_key)
keyed_state_backend.set_current_key(state_key_selector(user_input))
if is_left:
return process_function.process_element1(
input_selector(user_input), ctx
)
else:
return process_function.process_element2(
input_selector(user_input), ctx
)
else:
raise Exception("Unsupported func_type: " + str(func_type))
elif func_type == UserDefinedDataStreamFunction.KEYED_CO_BROADCAST_PROCESS:
timer_service = TimerServiceImpl(internal_timer_service)
ctx = InternalKeyedBroadcastProcessFunctionContext(timer_service, operator_state_store)
read_only_ctx = InternalKeyedBroadcastProcessFunctionReadOnlyContext(timer_service,
operator_state_store)
on_timer_ctx = InternalKeyedBroadcastProcessFunctionOnTimerContext(timer_service,
operator_state_store)
internal_timer_service.set_namespace_serializer(VoidNamespaceSerializer())
def open_func():
if hasattr(user_defined_func, "open"):
user_defined_func.open(runtime_context)
def close_func():
if hasattr(user_defined_func, "close"):
user_defined_func.close()
def on_event_time(timestamp: int, key, namespace):
keyed_state_backend.set_current_key(key)
return _on_timer(TimeDomain.EVENT_TIME, timestamp, key)
def on_processing_time(timestamp: int, key, namespace):
keyed_state_backend.set_current_key(key)
return _on_timer(TimeDomain.PROCESSING_TIME, timestamp, key)
def _on_timer(time_domain: TimeDomain, timestamp: int, key):
user_current_key = user_key_selector(key)
on_timer_ctx.set_timestamp(timestamp)
on_timer_ctx.set_current_key(user_current_key)
on_timer_ctx.set_time_domain(time_domain)
return user_defined_func.on_timer(timestamp, on_timer_ctx)
def process_element(normal_data, timestamp):
ctx.set_timestamp(timestamp)
read_only_ctx.set_timestamp(timestamp)
if normal_data[0]:
data = normal_data[1]
read_only_ctx.set_current_key(user_key_selector(data))
keyed_state_backend.set_current_key(state_key_selector(data))
return user_defined_func.process_element(input_selector(data), read_only_ctx)
else:
return user_defined_func.process_broadcast_element(normal_data[2], ctx)
elif func_type == UserDefinedDataStreamFunction.WINDOW:
window_operation_descriptor = (
user_defined_func
) # type: WindowOperationDescriptor
window_assigner = window_operation_descriptor.assigner
window_trigger = window_operation_descriptor.trigger
allowed_lateness = window_operation_descriptor.allowed_lateness
late_data_output_tag = window_operation_descriptor.late_data_output_tag
window_state_descriptor = window_operation_descriptor.window_state_descriptor
internal_window_function = window_operation_descriptor.internal_window_function
window_serializer = window_operation_descriptor.window_serializer
window_coder = window_serializer._get_coder()
keyed_state_backend.namespace_coder = window_coder
keyed_state_backend._namespace_coder_impl = window_coder.get_impl()
window_operator = WindowOperator(
window_assigner,
keyed_state_backend,
user_key_selector,
window_state_descriptor,
internal_window_function,
window_trigger,
allowed_lateness,
late_data_output_tag,
)
internal_timer_service.set_namespace_serializer(window_serializer)
def open_func():
window_operator.open(runtime_context, internal_timer_service)
def close_func():
window_operator.close()
def process_element(normal_data, timestamp: int):
keyed_state_backend.set_current_key(state_key_selector(normal_data))
return window_operator.process_element(
input_selector(normal_data), timestamp
)
def on_event_time(timestamp: int, key, namespace):
keyed_state_backend.set_current_key(key)
return window_operator.on_event_time(timestamp, key, namespace)
def on_processing_time(timestamp: int, key, namespace):
keyed_state_backend.set_current_key(key)
return window_operator.on_processing_time(timestamp, key, namespace)
else:
raise Exception("Unsupported function_type: " + str(func_type))
input_handler = RunnerInputHandler(internal_timer_service, process_element, has_side_output)
process_element_func = input_handler.process_element
timer_handler = TimerHandler(
internal_timer_service,
on_event_time,
on_processing_time,
keyed_state_backend._namespace_coder_impl,
has_side_output
)
process_timer_func = timer_handler.process_timer
return (
open_func,
close_func,
process_element_func,
process_timer_func,
internal_timer_service)
| 19,162 | 39.77234 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/window/merging_window_set.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Collection, Iterable
from pyflink.datastream import MergingWindowAssigner
from pyflink.datastream.state import MapState
W = TypeVar("W")
class MergeResultsCallback(MergingWindowAssigner.MergeCallback):
def __init__(self, merge_results: dict):
self._merge_results = merge_results
def merge(self, to_be_merged: Iterable[W], merge_result: W) -> None:
self._merge_results[merge_result] = to_be_merged
class MergingWindowSet(Generic[W]):
class MergeFunction(ABC, Generic[W]):
@abstractmethod
def merge(self,
merge_result: W,
merged_windows: Collection[W],
state_window_result: W,
merged_state_windows: Collection[W]):
pass
def __init__(self, assigner: MergingWindowAssigner, state: MapState[W, W]):
self._window_assigner = assigner
self._mapping = dict()
for window_for_user, window_in_state in state.items():
self._mapping[window_for_user] = window_in_state
self._state = state
self._initial_mapping = dict(self._mapping)
def persist(self) -> None:
if self._mapping != self._initial_mapping:
self._state.clear()
for window_for_user, window_in_state in self._mapping.items():
self._state.put(window_for_user, window_in_state)
def get_state_window(self, window: W) -> W:
if window in self._mapping:
return self._mapping[window]
else:
return None
def retire_window(self, window) -> None:
if window in self._mapping:
self._mapping.pop(window)
else:
raise Exception("Window %s is not in in-flight window set." % window)
def add_window(self, new_window: W, merge_function: MergeFunction[W]):
windows = []
windows.extend(self._mapping.keys())
windows.append(new_window)
merge_results = dict()
self._window_assigner.merge_windows(windows, MergeResultsCallback(merge_results))
result_window = new_window
merged_new_window = False
for merge_result, merged_windows in merge_results.items():
if new_window in merged_windows:
merged_new_window = True
merged_windows.remove(new_window)
result_window = merge_result
merged_state_window = self._mapping[next(iter(merged_windows))]
merged_state_windows = []
for merged_window in merged_windows:
if merged_window in self._mapping:
res = self._mapping.pop(merged_window)
merged_state_windows.append(res)
self._mapping[merge_result] = merged_state_window
merged_state_windows.remove(merged_state_window)
if merge_result not in merged_windows or len(merged_windows) != 1:
merge_function.merge(
merge_result,
merged_windows,
self._mapping[merge_result],
merged_state_windows)
if len(merge_results) == 0 or (result_window == new_window and not merged_new_window):
self._mapping[result_window] = result_window
return result_window
| 4,315 | 36.206897 | 94 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/window/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/window/window_operator.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
from typing import TypeVar, Iterable, Collection, Optional
from pyflink.common.constants import MAX_LONG_VALUE
from pyflink.common.typeinfo import PickledBytesTypeInfo
from pyflink.datastream import WindowAssigner, Trigger, MergingWindowAssigner, TriggerResult
from pyflink.datastream.functions import KeyedStateStore, RuntimeContext, InternalWindowFunction
from pyflink.datastream.output_tag import OutputTag
from pyflink.datastream.state import StateDescriptor, ListStateDescriptor, \
ReducingStateDescriptor, AggregatingStateDescriptor, ValueStateDescriptor, MapStateDescriptor, \
State, AggregatingState, ReducingState, MapState, ListState, ValueState, AppendingState
from pyflink.fn_execution.datastream.timerservice import InternalTimerService
from pyflink.fn_execution.datastream.window.merging_window_set import MergingWindowSet
from pyflink.fn_execution.internal_state import InternalMergingState, InternalKvState, \
InternalAppendingState
from pyflink.metrics import MetricGroup
T = TypeVar("T")
IN = TypeVar("IN")
OUT = TypeVar("OUT")
KEY = TypeVar("KEY")
W = TypeVar("W")
def get_or_create_keyed_state(runtime_context, state_descriptor):
if isinstance(state_descriptor, ListStateDescriptor):
state = runtime_context.get_list_state(state_descriptor)
elif isinstance(state_descriptor, ReducingStateDescriptor):
state = runtime_context.get_reducing_state(state_descriptor)
elif isinstance(state_descriptor, AggregatingStateDescriptor):
state = runtime_context.get_aggregating_state(state_descriptor)
elif isinstance(state_descriptor, ValueStateDescriptor):
state = runtime_context.get_state(state_descriptor)
elif isinstance(state_descriptor, MapStateDescriptor):
state = runtime_context.get_map_state(state_descriptor)
else:
raise Exception("Unsupported state descriptor: %s" % type(state_descriptor))
return state
class MergingWindowStateStore(KeyedStateStore):
def __init__(self):
self.window = None
def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:
raise Exception("Per-window state is not allowed when using merging windows.")
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:
raise Exception("Per-window state is not allowed when using merging windows.")
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:
raise Exception("Per-window state is not allowed when using merging windows.")
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:
raise Exception("Per-window state is not allowed when using merging windows.")
def get_aggregating_state(
self, state_descriptor: AggregatingStateDescriptor) -> AggregatingState:
raise Exception("Per-window state is not allowed when using merging windows.")
class PerWindowStateStore(KeyedStateStore):
def __init__(self, runtime_context):
self._runtime_context = runtime_context
self.window = None
def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:
return self._set_namespace(self._runtime_context.get_state(state_descriptor))
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:
return self._set_namespace(self._runtime_context.get_list_state(state_descriptor))
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:
return self._set_namespace(self._runtime_context.get_map_state(state_descriptor))
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:
return self._set_namespace(self._runtime_context.get_reducing_state(state_descriptor))
def get_aggregating_state(
self, state_descriptor: AggregatingStateDescriptor) -> AggregatingState:
return self._set_namespace(self._runtime_context.get_aggregating_state(state_descriptor))
def _set_namespace(self, state):
state.set_current_namespace(self.window)
return state
class Context(Trigger.OnMergeContext):
def __init__(
self,
runtime_context: RuntimeContext,
internal_timer_service: InternalTimerService,
trigger: Trigger):
self._runtime_context = runtime_context
self._internal_timer_service = internal_timer_service
self._trigger = trigger
self.user_key = None
self.window = None
self.merged_windows = None
def get_current_processing_time(self) -> int:
return self._internal_timer_service.current_processing_time()
def get_metric_group(self) -> MetricGroup:
return self._runtime_context.get_metrics_group()
def get_current_watermark(self) -> int:
return self._internal_timer_service.current_watermark()
def register_processing_time_timer(self, time: int) -> None:
self._internal_timer_service.register_processing_time_timer(self.window, time)
def register_event_time_timer(self, time: int) -> None:
self._internal_timer_service.register_event_time_timer(self.window, time)
def delete_processing_time_timer(self, time: int) -> None:
self._internal_timer_service.delete_processing_time_timer(self.window, time)
def delete_event_time_timer(self, time: int) -> None:
self._internal_timer_service.delete_event_time_timer(self.window, time)
def merge_partitioned_state(self, state_descriptor: StateDescriptor) -> None:
if self.merged_windows is not None and len(self.merged_windows) > 0:
raw_state = get_or_create_keyed_state(self._runtime_context, state_descriptor)
if isinstance(raw_state, InternalMergingState):
raw_state.merge_namespaces(self.window, self.merged_windows)
else:
raise Exception(
"The given state descriptor does not refer to a mergeable state (MergingState)")
def get_partitioned_state(self, state_descriptor: StateDescriptor) -> State:
state = get_or_create_keyed_state(
self._runtime_context, state_descriptor) # type: InternalKvState
state.set_current_namespace(self.window)
return state
def on_element(self, value, timestamp) -> TriggerResult:
return self._trigger.on_element(value, timestamp, self.window, self)
def on_processing_time(self, time) -> TriggerResult:
return self._trigger.on_processing_time(time, self.window, self)
def on_event_time(self, time) -> TriggerResult:
return self._trigger.on_event_time(time, self.window, self)
def on_merge(self, merged_windows) -> None:
self.merged_windows = merged_windows
self._trigger.on_merge(self.window, self)
def clear(self) -> None:
self._trigger.clear(self.window, self)
class WindowContext(InternalWindowFunction.InternalWindowContext):
def __init__(self,
window_assigner: WindowAssigner,
runtime_context: RuntimeContext,
window_function: InternalWindowFunction,
internal_timer_service: InternalTimerService):
self.window = None
if isinstance(window_assigner, MergingWindowAssigner):
self._window_state = MergingWindowStateStore()
else:
self._window_state = PerWindowStateStore(runtime_context)
self._runtime_context = runtime_context
self._user_function = window_function
self._internal_timer_service = internal_timer_service
def current_processing_time(self) -> int:
return self._internal_timer_service.current_processing_time()
def current_watermark(self) -> int:
return self._internal_timer_service.current_watermark()
def window_state(self) -> KeyedStateStore:
self._window_state.window = self.window
return self._window_state
def global_state(self) -> KeyedStateStore:
return self._runtime_context
def clear(self) -> None:
self._user_function.clear(self.window, self)
class WindowAssignerContext(WindowAssigner.WindowAssignerContext):
def __init__(self,
internal_timer_service: InternalTimerService,
runtime_context: RuntimeContext):
self._internal_timer_service = internal_timer_service
self._runtime_context = runtime_context
def get_current_processing_time(self) -> int:
return self._internal_timer_service.current_processing_time()
def get_runtime_context(self) -> RuntimeContext:
return self._runtime_context
class WindowMergeFunction(MergingWindowSet.MergeFunction[W]):
def __init__(self,
window_operator: 'WindowOperator'):
self._window_assigner = window_operator.window_assigner
self._internal_timer_service = window_operator.internal_timer_service
self._allowed_lateness = window_operator.allowed_lateness
self._trigger_context = window_operator.trigger_context
self._window_merging_state = window_operator.window_merging_state
self._user_key_selector = window_operator.user_key_selector
self.delete_cleanup_timer = window_operator.delete_cleanup_timer
self.key = None
def merge(self,
merge_result: W,
merged_windows: Collection[W],
state_window_result: W,
merged_state_windows: Collection[W]):
if self._window_assigner.is_event_time() and \
merge_result.max_timestamp() + self._allowed_lateness <= \
self._internal_timer_service.current_watermark():
raise Exception("The end timestamp of an event-time window cannot become earlier than "
"the current watermark by merging. Current watermark: %d window: %s" %
(self._internal_timer_service.current_watermark(), merge_result))
elif not self._window_assigner.is_event_time():
current_processing_time = self._internal_timer_service.current_processing_time()
if merge_result.max_timestamp() <= current_processing_time:
raise Exception("The end timestamp of a processing-time window cannot become "
"earlier than the current processing time by merging. Current "
"processing time: %d window: %s" %
(current_processing_time, merge_result))
self._trigger_context.user_key = self._user_key_selector(self.key)
self._trigger_context.window = merge_result
self._trigger_context.on_merge(merged_windows)
for m in merged_windows:
self._trigger_context.window = m
self._trigger_context.clear()
self.delete_cleanup_timer(m)
self._window_merging_state.merge_namespaces(state_window_result, merged_state_windows)
class WindowOperator(object):
LATE_ELEMENTS_DROPPED_METRIC_NAME = "numLateRecordsDropped"
def __init__(self,
window_assigner: WindowAssigner,
keyed_state_backend,
user_key_selector,
window_state_descriptor: StateDescriptor,
window_function: InternalWindowFunction,
trigger: Trigger,
allowed_lateness: int,
late_data_output_tag: Optional[OutputTag]):
self.window_assigner = window_assigner
self.keyed_state_backend = keyed_state_backend
self.user_key_selector = user_key_selector
self.window_state_descriptor = window_state_descriptor
self.window_function = window_function
self.trigger = trigger
self.allowed_lateness = allowed_lateness
self.late_data_output_tag = late_data_output_tag
self.num_late_records_dropped = None
self.internal_timer_service = None # type: InternalTimerService
self.trigger_context = None # type: Context
self.process_context = None # type: WindowContext
self.window_assigner_context = None # type: WindowAssignerContext
self.window_state = None # type: InternalAppendingState
self.window_merging_state = None # type: InternalMergingState
self.merging_sets_state = None
self.merge_function = None # type: WindowMergeFunction
def open(self, runtime_context: RuntimeContext, internal_timer_service: InternalTimerService):
self.window_function.open(runtime_context)
self.num_late_records_dropped = runtime_context.get_metrics_group().counter(
self.LATE_ELEMENTS_DROPPED_METRIC_NAME)
self.internal_timer_service = internal_timer_service
self.trigger_context = Context(runtime_context, internal_timer_service, self.trigger)
self.process_context = WindowContext(
self.window_assigner,
runtime_context,
self.window_function,
self.internal_timer_service)
self.window_assigner_context = WindowAssignerContext(
self.internal_timer_service,
runtime_context)
# create (or restore) the state that hold the actual window contents
# NOTE - the state may be null in the case of the overriding evicting window operator
if self.window_state_descriptor is not None:
self.window_state = get_or_create_keyed_state(
runtime_context, self.window_state_descriptor)
if isinstance(self.window_assigner, MergingWindowAssigner):
if isinstance(self.window_state, InternalMergingState):
self.window_merging_state = self.window_state
if hasattr(self.keyed_state_backend, 'namespace_coder'):
window_coder = self.keyed_state_backend.namespace_coder
self.merging_sets_state = self.keyed_state_backend.get_map_state(
"merging-window-set", window_coder, window_coder)
else:
state_descriptor = MapStateDescriptor(
"merging-window-set",
PickledBytesTypeInfo(),
PickledBytesTypeInfo())
self.merging_sets_state = self.keyed_state_backend.get_map_state(state_descriptor)
self.merge_function = WindowMergeFunction(self)
def close(self):
self.window_function.close()
self.trigger_context = None
self.process_context = None
self.window_assigner_context = None
def process_element(self, value, timestamp: int):
element_windows = self.window_assigner.assign_windows(
value, timestamp, self.window_assigner_context)
is_skipped_element = True
key = self.keyed_state_backend.get_current_key()
self.merge_function.key = key
if isinstance(self.window_assigner, MergingWindowAssigner):
merging_windows = self.get_merging_window_set()
for window in element_windows:
actual_window = merging_windows.add_window(window, self.merge_function)
if self.is_window_late(actual_window):
merging_windows.retire_window(actual_window)
continue
is_skipped_element = False
state_window = merging_windows.get_state_window(actual_window)
if state_window is None:
raise Exception("Window %s is not in in-flight window set." % state_window)
self.window_state.set_current_namespace(state_window)
self.window_state.add(value)
self.trigger_context.user_key = self.user_key_selector(key)
self.trigger_context.window = actual_window
trigger_result = self.trigger_context.on_element(value, timestamp)
if trigger_result.is_fire():
contents = self.window_state.get()
# for list state the iterable will never be none
if isinstance(self.window_state, ListState):
contents = [i for i in contents]
if len(contents) == 0:
contents = None
if contents is None:
continue
yield from self.emit_window_contents(actual_window, contents)
if trigger_result.is_purge():
self.window_state.clear()
self.register_cleanup_timer(actual_window)
merging_windows.persist()
else:
for window in element_windows:
if self.is_window_late(window):
continue
is_skipped_element = False
self.window_state.set_current_namespace(window)
self.window_state.add(value)
self.trigger_context.user_key = self.user_key_selector(key)
self.trigger_context.window = window
trigger_result = self.trigger_context.on_element(value, timestamp)
if trigger_result.is_fire():
contents = self.window_state.get()
# for list state the iterable will never be none
if isinstance(self.window_state, ListState):
contents = [i for i in contents]
if len(contents) == 0:
contents = None
if contents is None:
continue
yield from self.emit_window_contents(window, contents)
if trigger_result.is_purge():
self.window_state.clear()
self.register_cleanup_timer(window)
if is_skipped_element and self.is_element_late(value, timestamp):
if self.late_data_output_tag is not None:
yield self.late_data_output_tag, value
else:
self.num_late_records_dropped.inc()
def on_event_time(self, timestamp, key, namespace) -> None:
self.trigger_context.user_key = self.user_key_selector(key)
self.trigger_context.window = namespace
if isinstance(self.window_assigner, MergingWindowAssigner):
merging_windows = self.get_merging_window_set()
state_window = merging_windows.get_state_window(self.trigger_context.window)
if state_window is None:
# Timer firing for non-existent window, this can only happen if a
# trigger did not clean up timers. We have already cleared the merging
# window and therefore the Trigger state, however, so nothing to do.
return
else:
self.window_state.set_current_namespace(state_window)
else:
self.window_state.set_current_namespace(self.trigger_context.window)
merging_windows = None
trigger_result = self.trigger_context.on_event_time(timestamp)
if trigger_result.is_fire():
contents = self.window_state.get()
# for list state the iterable will never be none
if isinstance(self.window_state, ListState):
contents = [i for i in contents]
if len(contents) == 0:
contents = None
if contents is not None:
yield from self.emit_window_contents(self.trigger_context.window, contents)
if trigger_result.is_purge():
self.window_state.clear()
if self.window_assigner.is_event_time() and self.is_cleanup_time(
self.trigger_context.window, timestamp):
self.clear_all_state(self.trigger_context.window, self.window_state, merging_windows)
if merging_windows is not None:
merging_windows.persist()
def on_processing_time(self, timestamp, key, namespace):
self.trigger_context.user_key = self.user_key_selector(key)
self.trigger_context.window = namespace
if isinstance(self.window_assigner, MergingWindowAssigner):
merging_windows = self.get_merging_window_set()
state_window = merging_windows.get_state_window(self.trigger_context.window)
if state_window is None:
# Timer firing for non-existent window, this can only happen if a
# trigger did not clean up timers. We have already cleared the merging
# window and therefore the Trigger state, however, so nothing to do.
return
else:
self.window_state.set_current_namespace(state_window)
else:
self.window_state.set_current_namespace(self.trigger_context.window)
merging_windows = None
trigger_result = self.trigger_context.on_processing_time(timestamp)
if trigger_result.is_fire():
contents = self.window_state.get()
# for list state the iterable will never be none
if isinstance(self.window_state, ListState):
contents = [i for i in contents]
if len(contents) == 0:
contents = None
if contents is not None:
yield from self.emit_window_contents(self.trigger_context.window, contents)
if trigger_result.is_purge():
self.window_state.clear()
if not self.window_assigner.is_event_time() and self.is_cleanup_time(
self.trigger_context.window, timestamp):
self.clear_all_state(self.trigger_context.window, self.window_state, merging_windows)
if merging_windows is not None:
merging_windows.persist()
def get_merging_window_set(self) -> MergingWindowSet:
return MergingWindowSet(
typing.cast(MergingWindowAssigner[T, W], self.window_assigner),
self.merging_sets_state)
def cleanup_time(self, window) -> int:
if self.window_assigner.is_event_time():
time = window.max_timestamp() + self.allowed_lateness
if time >= window.max_timestamp():
return time
else:
return MAX_LONG_VALUE
else:
return window.max_timestamp()
def is_cleanup_time(self, window, time) -> bool:
return time == self.cleanup_time(window)
def register_cleanup_timer(self, window) -> None:
cleanup_time = self.cleanup_time(window)
if cleanup_time == MAX_LONG_VALUE:
return
if self.window_assigner.is_event_time():
self.trigger_context.register_event_time_timer(cleanup_time)
else:
self.trigger_context.register_processing_time_timer(cleanup_time)
def delete_cleanup_timer(self, window) -> None:
cleanup_time = self.cleanup_time(window)
if cleanup_time == MAX_LONG_VALUE:
return
if self.window_assigner.is_event_time():
self.trigger_context.delete_event_time_timer(cleanup_time)
else:
self.trigger_context.delete_processing_time_timer(cleanup_time)
def is_window_late(self, window) -> bool:
return self.window_assigner.is_event_time() and \
self.cleanup_time(window) <= self.internal_timer_service.current_watermark()
def is_element_late(self, value, timestamp) -> bool:
return self.window_assigner.is_event_time() and timestamp + self.allowed_lateness <= \
self.internal_timer_service.current_watermark()
def clear_all_state(
self, window, window_state: AppendingState, merging_windows: MergingWindowSet):
window_state.clear()
self.trigger_context.clear()
self.process_context.window = window
self.process_context.clear()
if merging_windows is not None:
merging_windows.retire_window(window)
merging_windows.persist()
def emit_window_contents(self, window, contents) -> Iterable:
self.process_context.window = window
return self.window_function.process(
self.trigger_context.user_key, window, self.process_context, contents)
| 25,132 | 43.483186 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/side_output_context.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict
from pyflink.fn_execution.embedded.converters import from_type_info_proto, DataConverter
class SideOutputContext(object):
def __init__(self, j_side_output_context):
self._j_side_output_context = j_side_output_context
self._side_output_converters = (
{tag_id: from_type_info_proto(_parse_type_info_proto(payload))
for tag_id, payload in
j_side_output_context.getAllSideOutputTypeInfoPayloads().items()}
) # type: Dict[str, DataConverter]
def collect(self, tag_id: str, record):
try:
self._j_side_output_context.collectSideOutputById(
tag_id,
self._side_output_converters[tag_id].to_external(record))
except KeyError:
raise Exception("Unknown OutputTag id {0}, supported OutputTag ids are {1}".format(
tag_id, list(self._side_output_converters.keys())))
def _parse_type_info_proto(type_info_payload):
from pyflink.fn_execution import flink_fn_execution_pb2
type_info = flink_fn_execution_pb2.TypeInfo()
type_info.ParseFromString(type_info_payload)
return type_info
| 2,127 | 43.333333 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/state_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC
from typing import List, Iterable, Tuple, Dict, Collection
from pyflink.datastream import ReduceFunction, AggregateFunction
from pyflink.datastream.state import (T, IN, OUT, V, K, State)
from pyflink.fn_execution.embedded.converters import (DataConverter, DictDataConverter,
ListDataConverter)
from pyflink.fn_execution.internal_state import (InternalValueState, InternalKvState,
InternalListState, InternalReducingState,
InternalAggregatingState, InternalMapState,
N, InternalReadOnlyBroadcastState,
InternalBroadcastState)
class StateImpl(State, ABC):
def __init__(self,
state,
value_converter: DataConverter):
self._state = state
self._value_converter = value_converter
def clear(self):
self._state.clear()
class KeyedStateImpl(StateImpl, InternalKvState, ABC):
def __init__(self,
state,
value_converter: DataConverter,
window_converter: DataConverter = None):
super(KeyedStateImpl, self).__init__(state, value_converter)
self._window_converter = window_converter
def set_current_namespace(self, namespace) -> None:
j_window = self._window_converter.to_external(namespace)
self._state.setCurrentNamespace(j_window)
class ValueStateImpl(KeyedStateImpl, InternalValueState):
def __init__(self,
value_state,
value_converter: DataConverter,
window_converter: DataConverter = None):
super(ValueStateImpl, self).__init__(value_state, value_converter, window_converter)
def value(self) -> T:
return self._value_converter.to_internal(self._state.value())
def update(self, value: T) -> None:
self._state.update(self._value_converter.to_external(value))
class ListStateImpl(KeyedStateImpl, InternalListState):
def __init__(self,
list_state,
value_converter: ListDataConverter,
window_converter: DataConverter = None):
super(ListStateImpl, self).__init__(list_state, value_converter, window_converter)
self._element_converter = value_converter._field_converter
def update(self, values: List[T]) -> None:
self._state.update(self._value_converter.to_external(values))
def add_all(self, values: List[T]) -> None:
self._state.addAll(self._value_converter.to_external(values))
def get(self) -> OUT:
states = self._value_converter.to_internal(self._state.get())
if states:
yield from states
def add(self, value: IN) -> None:
self._state.add(self._element_converter.to_external(value))
def merge_namespaces(self, target: N, sources: Collection[N]) -> None:
j_target = self._window_converter.to_external(target)
j_sources = [self._window_converter.to_external(window) for window in sources]
self._state.mergeNamespaces(j_target, j_sources)
class ReducingStateImpl(KeyedStateImpl, InternalReducingState):
def __init__(self,
value_state,
value_converter: DataConverter,
reduce_function: ReduceFunction,
window_converter: DataConverter = None):
super(ReducingStateImpl, self).__init__(value_state, value_converter, window_converter)
self._reduce_function = reduce_function
def get(self) -> OUT:
return self._value_converter.to_internal(self._state.value())
def add(self, value: IN) -> None:
if value is None:
self.clear()
else:
current_value = self.get()
if current_value is None:
reduce_value = value
else:
reduce_value = self._reduce_function.reduce(current_value, value)
self._state.update(self._value_converter.to_external(reduce_value))
def merge_namespaces(self, target: N, sources: Collection[N]) -> None:
merged = None
for source in sources:
self.set_current_namespace(source)
source_state = self.get()
if source_state is None:
continue
self.clear()
if merged is None:
merged = source_state
else:
merged = self._reduce_function.reduce(merged, source_state)
if merged is not None:
self.set_current_namespace(target)
self._state.update(self._value_converter.to_external(merged))
class AggregatingStateImpl(KeyedStateImpl, InternalAggregatingState):
def __init__(self,
value_state,
value_converter,
agg_function: AggregateFunction,
window_converter: DataConverter = None):
super(AggregatingStateImpl, self).__init__(value_state, value_converter, window_converter)
self._agg_function = agg_function
def get(self) -> OUT:
accumulator = self._value_converter.to_internal(self._state.value())
if accumulator is None:
return None
else:
return self._agg_function.get_result(accumulator)
def add(self, value: IN) -> None:
if value is None:
self.clear()
else:
accumulator = self._value_converter.to_internal(self._state.value())
if accumulator is None:
accumulator = self._agg_function.create_accumulator()
accumulator = self._agg_function.add(value, accumulator)
self._state.update(self._value_converter.to_external(accumulator))
def merge_namespaces(self, target: N, sources: Collection[N]) -> None:
merged = None
for source in sources:
self.set_current_namespace(source)
source_state = self.get()
if source_state is None:
continue
self.clear()
if merged is None:
merged = source_state
else:
merged = self._agg_function.merge(merged, source_state)
if merged is not None:
self.set_current_namespace(target)
self._state.update(self._value_converter.to_external(merged))
class MapStateImpl(KeyedStateImpl, InternalMapState):
def __init__(self,
map_state,
map_converter: DictDataConverter,
window_converter: DataConverter = None):
super(MapStateImpl, self).__init__(map_state, map_converter, window_converter)
self._k_converter = map_converter._key_converter
self._v_converter = map_converter._value_converter
def get(self, key: K) -> V:
return self._v_converter.to_internal(
self._state.get(self._k_converter.to_external(key)))
def put(self, key: K, value: V) -> None:
self._state.put(self._k_converter.to_external(key), self._v_converter.to_external(value))
def put_all(self, dict_value: Dict[K, V]) -> None:
self._state.putAll(self._value_converter.to_external(dict_value))
def remove(self, key: K) -> None:
self._state.remove(self._k_converter.to_external(key))
def contains(self, key: K) -> bool:
return self._state.contains(self._k_converter.to_external(key))
def items(self) -> Iterable[Tuple[K, V]]:
entries = self._state.entries()
if entries:
for entry in entries:
yield (self._k_converter.to_internal(entry.getKey()),
self._v_converter.to_internal(entry.getValue()))
def keys(self) -> Iterable[K]:
keys = self._state.keys()
if keys:
for k in keys:
yield self._k_converter.to_internal(k)
def values(self) -> Iterable[V]:
values = self._state.values()
if values:
for v in values:
yield self._v_converter.to_internal(v)
def is_empty(self) -> bool:
return self._state.isEmpty()
class ReadOnlyBroadcastStateImpl(StateImpl, InternalReadOnlyBroadcastState):
def __init__(self,
map_state,
map_converter: DictDataConverter):
super(ReadOnlyBroadcastStateImpl, self).__init__(map_state, map_converter)
self._k_converter = map_converter._key_converter
self._v_converter = map_converter._value_converter
def get(self, key: K) -> V:
return self._v_converter.to_internal(
self._state.get(self._k_converter.to_external(key)))
def contains(self, key: K) -> bool:
return self._state.contains(self._k_converter.to_external(key))
def items(self) -> Iterable[Tuple[K, V]]:
entries = self._state.entries()
for entry in entries:
yield (self._k_converter.to_internal(entry.getKey()),
self._v_converter.to_internal(entry.getValue()))
def keys(self) -> Iterable[K]:
for k in self._state.keys():
yield self._k_converter.to_internal(k)
def values(self) -> Iterable[V]:
for v in self._state.values():
yield self._v_converter.to_internal(v)
def is_empty(self) -> bool:
return self._state.isEmpty()
class BroadcastStateImpl(ReadOnlyBroadcastStateImpl, InternalBroadcastState):
def __init__(self,
map_state,
map_converter: DictDataConverter):
super(BroadcastStateImpl, self).__init__(map_state, map_converter)
self._map_converter = map_converter
self._k_converter = map_converter._key_converter
self._v_converter = map_converter._value_converter
def to_read_only_broadcast_state(self) -> InternalReadOnlyBroadcastState[K, V]:
return ReadOnlyBroadcastStateImpl(self._state, self._map_converter)
def put(self, key: K, value: V) -> None:
self._state.put(self._k_converter.to_external(key), self._v_converter.to_external(value))
def put_all(self, dict_value: Dict[K, V]) -> None:
self._state.putAll(self._value_converter.to_external(dict_value))
def remove(self, key: K) -> None:
self._state.remove(self._k_converter.to_external(key))
| 11,343 | 36.939799 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/timerservice_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import TimerService
from pyflink.fn_execution.datastream.timerservice import InternalTimerService, N
class TimerServiceImpl(TimerService):
def __init__(self, j_timer_service):
self._j_timer_service = j_timer_service
def current_processing_time(self):
return self._j_timer_service.currentProcessingTime()
def current_watermark(self):
return self._j_timer_service.currentWatermark()
def register_processing_time_timer(self, timestamp: int):
self._j_timer_service.registerProcessingTimeTimer(timestamp)
def register_event_time_timer(self, timestamp: int):
self._j_timer_service.registerEventTimeTimer(timestamp)
def delete_processing_time_timer(self, timestamp: int):
self._j_timer_service.deleteProcessingTimeTimer(timestamp)
def delete_event_time_timer(self, timestamp: int):
self._j_timer_service.deleteEventTimeTimer(timestamp)
class InternalTimerServiceImpl(InternalTimerService[N]):
def __init__(self, j_timer_service, window_converter):
self._j_timer_service = j_timer_service
self._window_converter = window_converter
def current_processing_time(self):
return self._j_timer_service.currentProcessingTime()
def current_watermark(self):
return self._j_timer_service.currentWatermark()
def register_processing_time_timer(self, namespace: N, timestamp: int):
window = self._window_converter.to_external(namespace)
self._j_timer_service.registerProcessingTimeTimer(window, timestamp)
def register_event_time_timer(self, namespace: N, timestamp: int):
window = self._window_converter.to_external(namespace)
self._j_timer_service.registerEventTimeTimer(window, timestamp)
def delete_event_time_timer(self, namespace: N, timestamp: int):
window = self._window_converter.to_external(namespace)
self._j_timer_service.deleteEventTimeTimer(window, timestamp)
def delete_processing_time_timer(self, namespace: N, timestamp: int):
window = self._window_converter.to_external(namespace)
self._j_timer_service.deleteProcessingTimeTimer(window, timestamp)
| 3,150 | 43.380282 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/process_function.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC
from pyflink.datastream import (ProcessFunction, KeyedProcessFunction, CoProcessFunction,
KeyedCoProcessFunction, TimerService, TimeDomain)
from pyflink.datastream.functions import (BaseBroadcastProcessFunction, BroadcastProcessFunction,
KeyedBroadcastProcessFunction)
from pyflink.datastream.state import MapStateDescriptor, BroadcastState, ReadOnlyBroadcastState
from pyflink.fn_execution.datastream.embedded.state_impl import (ReadOnlyBroadcastStateImpl,
BroadcastStateImpl)
from pyflink.fn_execution.datastream.embedded.timerservice_impl import TimerServiceImpl
from pyflink.fn_execution.embedded.converters import from_type_info_proto, from_type_info
from pyflink.fn_execution.embedded.java_utils import to_java_state_descriptor
class InternalProcessFunctionContext(ProcessFunction.Context, CoProcessFunction.Context,
TimerService):
def __init__(self, j_context):
self._j_context = j_context
def timer_service(self) -> TimerService:
return self
def timestamp(self) -> int:
return self._j_context.timestamp()
def current_processing_time(self):
return self._j_context.currentProcessingTime()
def current_watermark(self):
return self._j_context.currentWatermark()
def register_processing_time_timer(self, timestamp: int):
raise Exception("Register timers is only supported on a keyed stream.")
def register_event_time_timer(self, timestamp: int):
raise Exception("Register timers is only supported on a keyed stream.")
def delete_processing_time_timer(self, t: int):
raise Exception("Deleting timers is only supported on a keyed streams.")
def delete_event_time_timer(self, t: int):
raise Exception("Deleting timers is only supported on a keyed streams.")
class InternalKeyedProcessFunctionContext(KeyedProcessFunction.Context,
KeyedCoProcessFunction.Context):
def __init__(self, j_context, key_type_info):
self._j_context = j_context
self._timer_service = TimerServiceImpl(self._j_context.timerService())
self._key_converter = from_type_info_proto(key_type_info)
def get_current_key(self):
return self._key_converter.to_internal(self._j_context.getCurrentKey())
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._j_context.timestamp()
class InternalKeyedProcessFunctionOnTimerContext(KeyedProcessFunction.OnTimerContext,
KeyedProcessFunction.Context,
KeyedCoProcessFunction.OnTimerContext,
KeyedCoProcessFunction.Context):
def __init__(self, j_timer_context, key_type_info):
self._j_timer_context = j_timer_context
self._timer_service = TimerServiceImpl(self._j_timer_context.timerService())
self._key_converter = from_type_info_proto(key_type_info)
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._j_timer_context.timestamp()
def time_domain(self) -> TimeDomain:
return TimeDomain(self._j_timer_context.timeDomain())
def get_current_key(self):
return self._key_converter.to_internal(self._j_timer_context.getCurrentKey())
class InternalWindowTimerContext(object):
def __init__(self, j_timer_context, key_type_info, window_converter):
self._j_timer_context = j_timer_context
self._key_converter = from_type_info_proto(key_type_info)
self._window_converter = window_converter
def timestamp(self) -> int:
return self._j_timer_context.timestamp()
def window(self):
return self._window_converter.to_internal(self._j_timer_context.getWindow())
def get_current_key(self):
return self._key_converter.to_internal(self._j_timer_context.getCurrentKey())
class InternalBaseBroadcastProcessFunctionContext(BaseBroadcastProcessFunction.Context, ABC):
def __init__(self, j_context, j_operator_state_backend):
self._j_context = j_context
self._j_operator_state_backend = j_operator_state_backend
def timestamp(self) -> int:
return self._j_context.timestamp()
def current_processing_time(self) -> int:
return self._j_context.currentProcessingTime()
def current_watermark(self) -> int:
return self._j_context.currentWatermark()
class InternalBroadcastProcessFunctionContext(InternalBaseBroadcastProcessFunctionContext,
BroadcastProcessFunction.Context):
def __init__(self, j_context, j_operator_state_backend):
super(InternalBroadcastProcessFunctionContext, self).__init__(
j_context, j_operator_state_backend)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> BroadcastState:
return BroadcastStateImpl(
self._j_operator_state_backend.getBroadcastState(
to_java_state_descriptor(state_descriptor)),
from_type_info(state_descriptor.type_info))
class InternalBroadcastProcessFunctionReadOnlyContext(InternalBaseBroadcastProcessFunctionContext,
BroadcastProcessFunction.ReadOnlyContext):
def __init__(self, j_context, j_operator_state_backend):
super(InternalBroadcastProcessFunctionReadOnlyContext, self).__init__(
j_context, j_operator_state_backend)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> ReadOnlyBroadcastState:
return ReadOnlyBroadcastStateImpl(
self._j_operator_state_backend.getBroadcastState(
to_java_state_descriptor(state_descriptor)),
from_type_info(state_descriptor.type_info))
class InternalKeyedBroadcastProcessFunctionContext(InternalBaseBroadcastProcessFunctionContext,
KeyedBroadcastProcessFunction.Context):
def __init__(self, j_context, j_operator_state_backend):
super(InternalKeyedBroadcastProcessFunctionContext, self).__init__(
j_context, j_operator_state_backend)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> BroadcastState:
return BroadcastStateImpl(
self._j_operator_state_backend.getBroadcastState(
to_java_state_descriptor(state_descriptor)),
from_type_info(state_descriptor.type_info))
class InternalKeyedBroadcastProcessFunctionReadOnlyContext(
InternalBaseBroadcastProcessFunctionContext,
KeyedBroadcastProcessFunction.ReadOnlyContext
):
def __init__(self, j_context, key_type_info, j_operator_state_backend):
super(InternalKeyedBroadcastProcessFunctionReadOnlyContext, self).__init__(
j_context, j_operator_state_backend)
self._key_converter = from_type_info_proto(key_type_info)
self._timer_service = TimerServiceImpl(self._j_context.timerService())
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> ReadOnlyBroadcastState:
return ReadOnlyBroadcastStateImpl(
self._j_operator_state_backend.getBroadcastState(
to_java_state_descriptor(state_descriptor)),
from_type_info(state_descriptor.type_info))
def timer_service(self) -> TimerService:
return self._timer_service
def get_current_key(self):
return self._key_converter.to_internal(self._j_context.getCurrentKey())
class InternalKeyedBroadcastProcessFunctionOnTimerContext(
InternalBaseBroadcastProcessFunctionContext,
KeyedBroadcastProcessFunction.OnTimerContext,
):
def __init__(self, j_timer_context, key_type_info, j_operator_state_backend):
super(InternalKeyedBroadcastProcessFunctionOnTimerContext, self).__init__(
j_timer_context, j_operator_state_backend)
self._timer_service = TimerServiceImpl(self._j_context.timerService())
self._key_converter = from_type_info_proto(key_type_info)
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> ReadOnlyBroadcastState:
return ReadOnlyBroadcastStateImpl(
self._j_operator_state_backend.getBroadcastState(
to_java_state_descriptor(state_descriptor)),
from_type_info(state_descriptor.type_info))
def current_processing_time(self) -> int:
return self._timer_service.current_processing_time()
def current_watermark(self) -> int:
return self._timer_service.current_watermark()
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._j_context.timestamp()
def time_domain(self) -> TimeDomain:
return TimeDomain(self._j_context.timeDomain())
def get_current_key(self):
return self._key_converter.to_internal(self._j_context.getCurrentKey())
| 10,188 | 42.357447 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/runtime_context.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import RuntimeContext
from pyflink.datastream.state import (AggregatingStateDescriptor, AggregatingState,
ReducingStateDescriptor, ReducingState, MapStateDescriptor,
MapState, ListStateDescriptor, ListState,
ValueStateDescriptor, ValueState)
from pyflink.fn_execution.embedded.state_impl import KeyedStateBackend
from pyflink.fn_execution.metrics.embedded.metric_impl import MetricGroupImpl
from pyflink.metrics import MetricGroup
class StreamingRuntimeContext(RuntimeContext):
def __init__(self, runtime_context, job_parameters):
self._runtime_context = runtime_context
self._job_parameters = job_parameters
self._keyed_state_backend = None # type: KeyedStateBackend
def get_task_name(self) -> str:
"""
Returns the name of the task in which the UDF runs, as assigned during plan construction.
"""
return self._runtime_context.getTaskName()
def get_number_of_parallel_subtasks(self) -> int:
"""
Gets the parallelism with which the parallel task runs.
"""
return self._runtime_context.getNumberOfParallelSubtasks()
def get_max_number_of_parallel_subtasks(self) -> int:
"""
Gets the number of max-parallelism with which the parallel task runs.
"""
return self._runtime_context.getMaxNumberOfParallelSubtasks()
def get_index_of_this_subtask(self) -> int:
"""
Gets the number of this parallel subtask. The numbering starts from 0 and goes up to
parallelism-1 (parallelism as returned by
:func:`~RuntimeContext.get_number_of_parallel_subtasks`).
"""
return self._runtime_context.getIndexOfThisSubtask()
def get_attempt_number(self) -> int:
"""
Gets the attempt number of this parallel subtask. First attempt is numbered 0.
"""
return self._runtime_context.getAttemptNumber()
def get_task_name_with_subtasks(self) -> str:
"""
Returns the name of the task, appended with the subtask indicator, such as "MyTask (3/6)",
where 3 would be (:func:`~RuntimeContext.get_index_of_this_subtask` + 1), and 6 would be
:func:`~RuntimeContext.get_number_of_parallel_subtasks`.
"""
return self._runtime_context.getTaskNameWithSubtasks()
def get_job_parameter(self, key: str, default_value: str):
"""
Gets the global job parameter value associated with the given key as a string.
"""
return self._job_parameters[key] if key in self._job_parameters else default_value
def get_metrics_group(self) -> MetricGroup:
return MetricGroupImpl(self._runtime_context.getMetricGroup())
def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:
return self._keyed_state_backend.get_value_state(state_descriptor)
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:
return self._keyed_state_backend.get_list_state(state_descriptor)
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:
return self._keyed_state_backend.get_map_state(state_descriptor)
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:
return self._keyed_state_backend.get_reducing_state(state_descriptor)
def get_aggregating_state(self,
state_descriptor: AggregatingStateDescriptor) -> AggregatingState:
return self._keyed_state_backend.get_aggregating_state(state_descriptor)
def set_keyed_state_backend(self, keyed_state_backend: KeyedStateBackend):
self._keyed_state_backend = keyed_state_backend
def get_keyed_state_backend(self):
return self._keyed_state_backend
@staticmethod
def of(runtime_context, job_parameters):
return StreamingRuntimeContext(runtime_context, job_parameters)
| 4,989 | 45.203704 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/datastream/embedded/operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import OutputTag
from pyflink.datastream.window import WindowOperationDescriptor
from pyflink.fn_execution import pickle
from pyflink.fn_execution.coders import TimeWindowCoder, CountWindowCoder
from pyflink.fn_execution.datastream import operations
from pyflink.fn_execution.datastream.embedded.process_function import (
InternalProcessFunctionContext, InternalKeyedProcessFunctionContext,
InternalKeyedProcessFunctionOnTimerContext, InternalWindowTimerContext,
InternalBroadcastProcessFunctionContext, InternalBroadcastProcessFunctionReadOnlyContext,
InternalKeyedBroadcastProcessFunctionContext,
InternalKeyedBroadcastProcessFunctionReadOnlyContext,
InternalKeyedBroadcastProcessFunctionOnTimerContext)
from pyflink.fn_execution.datastream.embedded.runtime_context import StreamingRuntimeContext
from pyflink.fn_execution.datastream.embedded.side_output_context import SideOutputContext
from pyflink.fn_execution.datastream.embedded.timerservice_impl import InternalTimerServiceImpl
from pyflink.fn_execution.datastream.window.window_operator import WindowOperator
from pyflink.fn_execution.embedded.converters import (TimeWindowConverter, CountWindowConverter,
GlobalWindowConverter)
from pyflink.fn_execution.embedded.state_impl import KeyedStateBackend
class OneInputOperation(operations.OneInputOperation):
def __init__(self, open_func, close_func, process_element_func, on_timer_func=None):
self._open_func = open_func
self._close_func = close_func
self._process_element_func = process_element_func
self._on_timer_func = on_timer_func
def open(self) -> None:
self._open_func()
def close(self) -> None:
self._close_func()
def process_element(self, value):
return self._process_element_func(value)
def on_timer(self, timestamp):
if self._on_timer_func:
return self._on_timer_func(timestamp)
class TwoInputOperation(operations.TwoInputOperation):
def __init__(self, open_func, close_func, process_element_func1, process_element_func2,
on_timer_func=None):
self._open_func = open_func
self._close_func = close_func
self._process_element_func1 = process_element_func1
self._process_element_func2 = process_element_func2
self._on_timer_func = on_timer_func
def open(self) -> None:
self._open_func()
def close(self) -> None:
self._close_func()
def process_element1(self, value):
return self._process_element_func1(value)
def process_element2(self, value):
return self._process_element_func2(value)
def on_timer(self, timestamp):
if self._on_timer_func:
return self._on_timer_func(timestamp)
def extract_process_function(
user_defined_function_proto, j_runtime_context, j_function_context, j_timer_context,
j_side_output_context, job_parameters, j_keyed_state_backend, j_operator_state_backend):
from pyflink.fn_execution import flink_fn_execution_pb2
UserDefinedDataStreamFunction = flink_fn_execution_pb2.UserDefinedDataStreamFunction
user_defined_func = pickle.loads(user_defined_function_proto.payload)
func_type = user_defined_function_proto.function_type
runtime_context = StreamingRuntimeContext.of(j_runtime_context, job_parameters)
if j_side_output_context:
side_output_context = SideOutputContext(j_side_output_context)
def process_func(values):
if values is None:
return
for value in values:
if isinstance(value, tuple) and isinstance(value[0], OutputTag):
output_tag = value[0] # type: OutputTag
side_output_context.collect(output_tag.tag_id, value[1])
else:
yield value
else:
def process_func(values):
if values is None:
return
yield from values
def open_func():
if hasattr(user_defined_func, "open"):
user_defined_func.open(runtime_context)
def close_func():
if hasattr(user_defined_func, "close"):
user_defined_func.close()
if func_type == UserDefinedDataStreamFunction.PROCESS:
function_context = InternalProcessFunctionContext(j_function_context)
process_element = user_defined_func.process_element
def process_element_func(value):
yield from process_func(process_element(value, function_context))
return OneInputOperation(open_func, close_func, process_element_func)
elif func_type == UserDefinedDataStreamFunction.KEYED_PROCESS:
function_context = InternalKeyedProcessFunctionContext(
j_function_context, user_defined_function_proto.key_type_info)
timer_context = InternalKeyedProcessFunctionOnTimerContext(
j_timer_context, user_defined_function_proto.key_type_info)
keyed_state_backend = KeyedStateBackend(
function_context,
j_keyed_state_backend)
runtime_context.set_keyed_state_backend(keyed_state_backend)
process_element = user_defined_func.process_element
on_timer = user_defined_func.on_timer
def process_element_func(value):
yield from process_func(process_element(value[1], function_context))
def on_timer_func(timestamp):
yield from process_func(on_timer(timestamp, timer_context))
return OneInputOperation(open_func, close_func, process_element_func, on_timer_func)
elif func_type == UserDefinedDataStreamFunction.CO_PROCESS:
function_context = InternalProcessFunctionContext(j_function_context)
process_element1 = user_defined_func.process_element1
process_element2 = user_defined_func.process_element2
def process_element_func1(value):
yield from process_func(process_element1(value, function_context))
def process_element_func2(value):
yield from process_func(process_element2(value, function_context))
return TwoInputOperation(
open_func, close_func, process_element_func1, process_element_func2)
elif func_type == UserDefinedDataStreamFunction.CO_BROADCAST_PROCESS:
broadcast_ctx = InternalBroadcastProcessFunctionContext(
j_function_context, j_operator_state_backend)
read_only_broadcast_ctx = InternalBroadcastProcessFunctionReadOnlyContext(
j_function_context, j_operator_state_backend)
process_element = user_defined_func.process_element
process_broadcast_element = user_defined_func.process_broadcast_element
def process_element_func1(value):
yield from process_func(process_element(value, read_only_broadcast_ctx))
def process_element_func2(value):
yield from process_func(process_broadcast_element(value, broadcast_ctx))
return TwoInputOperation(
open_func, close_func, process_element_func1, process_element_func2)
elif func_type == UserDefinedDataStreamFunction.KEYED_CO_PROCESS:
function_context = InternalKeyedProcessFunctionContext(
j_function_context, user_defined_function_proto.key_type_info)
timer_context = InternalKeyedProcessFunctionOnTimerContext(
j_timer_context, user_defined_function_proto.key_type_info)
keyed_state_backend = KeyedStateBackend(
function_context,
j_keyed_state_backend)
runtime_context.set_keyed_state_backend(keyed_state_backend)
process_element1 = user_defined_func.process_element1
process_element2 = user_defined_func.process_element2
on_timer = user_defined_func.on_timer
def process_element_func1(value):
yield from process_func(process_element1(value[1], function_context))
def process_element_func2(value):
yield from process_func(process_element2(value[1], function_context))
def on_timer_func(timestamp):
yield from process_func(on_timer(timestamp, timer_context))
return TwoInputOperation(
open_func, close_func, process_element_func1, process_element_func2, on_timer_func)
elif func_type == UserDefinedDataStreamFunction.KEYED_CO_BROADCAST_PROCESS:
broadcast_ctx = InternalKeyedBroadcastProcessFunctionContext(
j_function_context, j_operator_state_backend)
read_only_broadcast_ctx = InternalKeyedBroadcastProcessFunctionReadOnlyContext(
j_function_context, user_defined_function_proto.key_type_info, j_operator_state_backend)
timer_context = InternalKeyedBroadcastProcessFunctionOnTimerContext(
j_timer_context, user_defined_function_proto.key_type_info, j_operator_state_backend)
keyed_state_backend = KeyedStateBackend(
read_only_broadcast_ctx,
j_keyed_state_backend)
runtime_context.set_keyed_state_backend(keyed_state_backend)
process_element = user_defined_func.process_element
process_broadcast_element = user_defined_func.process_broadcast_element
on_timer = user_defined_func.on_timer
def process_element_func1(value):
yield from process_func(process_element(value[1], read_only_broadcast_ctx))
def process_element_func2(value):
yield from process_func(process_broadcast_element(value, broadcast_ctx))
def on_timer_func(timestamp):
yield from on_timer(timestamp, timer_context)
return TwoInputOperation(
open_func, close_func, process_element_func1, process_element_func2, on_timer_func)
elif func_type == UserDefinedDataStreamFunction.WINDOW:
window_operation_descriptor = (
user_defined_func
) # type: WindowOperationDescriptor
def user_key_selector(normal_data):
return normal_data
window_assigner = window_operation_descriptor.assigner
window_trigger = window_operation_descriptor.trigger
allowed_lateness = window_operation_descriptor.allowed_lateness
late_data_output_tag = window_operation_descriptor.late_data_output_tag
window_state_descriptor = window_operation_descriptor.window_state_descriptor
internal_window_function = window_operation_descriptor.internal_window_function
window_serializer = window_operation_descriptor.window_serializer
window_coder = window_serializer._get_coder()
if isinstance(window_coder, TimeWindowCoder):
window_converter = TimeWindowConverter()
elif isinstance(window_coder, CountWindowCoder):
window_converter = CountWindowConverter()
else:
window_converter = GlobalWindowConverter()
internal_timer_service = InternalTimerServiceImpl(
j_timer_context.timerService(), window_converter)
function_context = InternalKeyedProcessFunctionContext(
j_function_context,
user_defined_function_proto.key_type_info)
window_timer_context = InternalWindowTimerContext(
j_timer_context,
user_defined_function_proto.key_type_info,
window_converter)
keyed_state_backend = KeyedStateBackend(
function_context,
j_keyed_state_backend,
j_function_context.getWindowSerializer(),
window_converter)
runtime_context.set_keyed_state_backend(keyed_state_backend)
window_operator = WindowOperator(
window_assigner,
keyed_state_backend,
user_key_selector,
window_state_descriptor,
internal_window_function,
window_trigger,
allowed_lateness,
late_data_output_tag)
def open_func():
window_operator.open(runtime_context, internal_timer_service)
def close_func():
window_operator.close()
def process_element_func(value):
yield from process_func(
window_operator.process_element(value[1], function_context.timestamp()))
if window_assigner.is_event_time():
def on_timer_func(timestamp):
window = window_timer_context.window()
key = window_timer_context.get_current_key()
yield from process_func(window_operator.on_event_time(timestamp, key, window))
else:
def on_timer_func(timestamp):
window = window_timer_context.window()
key = window_timer_context.get_current_key()
yield from process_func(window_operator.on_processing_time(timestamp, key, window))
return OneInputOperation(open_func, close_func, process_element_func, on_timer_func)
else:
raise Exception("Unknown function type {0}.".format(func_type))
| 13,934 | 42.142415 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/window_process_function.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import abstractmethod, ABC
from typing import Generic, List, Iterable, Dict, Set
from pyflink.common import Row
from pyflink.common.constants import MAX_LONG_VALUE
from pyflink.datastream.state import MapState
from pyflink.fn_execution.table.window_assigner import WindowAssigner, PanedWindowAssigner, \
MergingWindowAssigner
from pyflink.fn_execution.table.window_context import Context, K, W
def join_row(left: List, right: List):
return Row(*(left + right))
class InternalWindowProcessFunction(Generic[K, W], ABC):
"""
The internal interface for functions that process over grouped windows.
"""
def __init__(self,
allowed_lateness: int,
window_assigner: WindowAssigner[W],
window_aggregator):
self._allowed_lateness = allowed_lateness
self._window_assigner = window_assigner
self._window_aggregator = window_aggregator
self._ctx = None # type: Context[K, W]
def open(self, ctx: Context[K, W]):
self._ctx = ctx
self._window_assigner.open(ctx)
def close(self):
pass
def is_cleanup_time(self, window: W, time: int) -> bool:
return time == self._cleanup_time(window)
def is_window_late(self, window: W) -> bool:
return self._window_assigner.is_event_time() and \
self._cleanup_time(window) <= self._ctx.current_watermark()
def _cleanup_time(self, window: W) -> int:
if self._window_assigner.is_event_time():
cleanup_time = window.max_timestamp() + self._allowed_lateness
if cleanup_time >= window.max_timestamp():
return cleanup_time
else:
return MAX_LONG_VALUE
else:
return window.max_timestamp()
@abstractmethod
def assign_state_namespace(self, input_row: List, timestamp: int) -> List[W]:
"""
Assigns the input element into the state namespace which the input element should be
accumulated/retracted into.
:param input_row: The input element
:param timestamp: The timestamp of the element or the processing time (depends on the type
of assigner)
:return: The state namespace.
"""
pass
@abstractmethod
def assign_actual_windows(self, input_row: List, timestamp: int) -> List[W]:
"""
Assigns the input element into the actual windows which the {@link Trigger} should trigger
on.
:param input_row: The input element
:param timestamp: The timestamp of the element or the processing time (depends on the type
of assigner)
:return: The actual windows
"""
pass
@abstractmethod
def prepare_aggregate_accumulator_for_emit(self, window: W):
"""
Prepares the accumulator of the given window before emit the final result. The accumulator
is stored in the state or will be created if there is no corresponding accumulator in state.
:param window: The window
"""
pass
@abstractmethod
def clean_window_if_needed(self, window: W, current_time: int):
"""
Cleans the given window if needed.
:param window: The window to cleanup
:param current_time: The current timestamp
"""
pass
class GeneralWindowProcessFunction(InternalWindowProcessFunction[K, W]):
"""
The general implementation of InternalWindowProcessFunction. The WindowAssigner should be a
regular assigner without implement PanedWindowAssigner or MergingWindowAssigner.
"""
def __init__(self,
allowed_lateness: int,
window_assigner: WindowAssigner[W],
window_aggregator):
super(GeneralWindowProcessFunction, self).__init__(
allowed_lateness, window_assigner, window_aggregator)
self._reuse_affected_windows = None # type: List[W]
def assign_state_namespace(self, input_row: List, timestamp: int) -> List[W]:
element_windows = self._window_assigner.assign_windows(input_row, timestamp)
self._reuse_affected_windows = []
for window in element_windows:
if not self.is_window_late(window):
self._reuse_affected_windows.append(window)
return self._reuse_affected_windows
def assign_actual_windows(self, input_row: List, timestamp: int) -> List[W]:
# actual windows is equal to affected window, reuse it
return self._reuse_affected_windows
def prepare_aggregate_accumulator_for_emit(self, window: W):
acc = self._ctx.get_window_accumulators(window)
if acc is None:
acc = self._window_aggregator.create_accumulators()
self._window_aggregator.set_accumulators(window, acc)
def clean_window_if_needed(self, window: W, current_time: int):
if self.is_cleanup_time(window, current_time):
self._ctx.clear_window_state(window)
self._window_aggregator.cleanup(window)
self._ctx.clear_trigger(window)
class PanedWindowProcessFunction(InternalWindowProcessFunction[K, W]):
"""
The implementation of InternalWindowProcessFunction for PanedWindowAssigner.
"""
def __init__(self,
allowed_lateness: int,
window_assigner: PanedWindowAssigner[W],
window_aggregator):
super(PanedWindowProcessFunction, self).__init__(
allowed_lateness, window_assigner, window_aggregator)
self._window_assigner = window_assigner
def assign_state_namespace(self, input_row: List, timestamp: int) -> List[W]:
pane = self._window_assigner.assign_pane(input_row, timestamp)
if not self._is_pane_late(pane):
return [pane]
else:
return []
def assign_actual_windows(self, input_row: List, timestamp: int) -> List[W]:
element_windows = self._window_assigner.assign_windows(input_row, timestamp)
actual_windows = []
for window in element_windows:
if not self.is_window_late(window):
actual_windows.append(window)
return actual_windows
def prepare_aggregate_accumulator_for_emit(self, window: W):
panes = self._window_assigner.split_into_panes(window)
acc = self._window_aggregator.create_accumulators()
# null namespace means use heap data views
self._window_aggregator.set_accumulators(None, acc)
for pane in panes:
pane_acc = self._ctx.get_window_accumulators(pane)
if pane_acc:
self._window_aggregator.merge(pane, pane_acc)
def clean_window_if_needed(self, window: W, current_time: int):
if self.is_cleanup_time(window, current_time):
panes = self._window_assigner.split_into_panes(window)
for pane in panes:
last_window = self._window_assigner.get_last_window(pane)
if window == last_window:
self._ctx.clear_window_state(pane)
self._ctx.clear_trigger(window)
def _is_pane_late(self, pane: W):
# whether the pane is late depends on the last window which the pane is belongs to is late
return self._window_assigner.is_event_time() and \
self.is_window_late(self._window_assigner.get_last_window(pane))
class MergeResultCollector(MergingWindowAssigner.MergeCallback):
def __init__(self):
self.merge_results = {} # type: Dict[W, Iterable[W]]
def merge(self, merge_result: W, to_be_merged: Iterable[W]):
self.merge_results[merge_result] = to_be_merged
class MergingWindowProcessFunction(InternalWindowProcessFunction[K, W]):
"""
The implementation of InternalWindowProcessFunction for MergingWindowAssigner.
"""
def __init__(self,
allowed_lateness: int,
window_assigner: MergingWindowAssigner[W],
window_aggregator,
state_backend):
super(MergingWindowProcessFunction, self).__init__(
allowed_lateness, window_assigner, window_aggregator)
self._window_assigner = window_assigner
self._reuse_actual_windows = None # type: List
self._window_mapping = None # type: MapState
self._state_backend = state_backend
self._sorted_windows = None # type: List
from pyflink.fn_execution.state_impl import LRUCache
self._cached_sorted_windows = LRUCache(10000, None)
def open(self, ctx: Context[K, W]):
super(MergingWindowProcessFunction, self).open(ctx)
self._window_mapping = self._state_backend.get_map_state(
'session-window-mapping',
self._state_backend.namespace_coder,
self._state_backend.namespace_coder)
def assign_state_namespace(self, input_row: List, timestamp: int) -> List[W]:
element_windows = self._window_assigner.assign_windows(input_row, timestamp)
self._initialize_cache(self._ctx.current_key())
self._reuse_actual_windows = []
for window in element_windows:
# adding the new window might result in a merge, in that case the actualWindow
# is the merged window and we work with that. If we don't merge then
# actualWindow == window
actual_window = self._add_window(window)
# drop if the window is already late
if self.is_window_late(actual_window):
self._window_mapping.remove(actual_window)
self._sorted_windows.remove(actual_window)
else:
self._reuse_actual_windows.append(actual_window)
affected_windows = [self._window_mapping.get(actual)
for actual in self._reuse_actual_windows]
return affected_windows
def assign_actual_windows(self, input_row: List, timestamp: int) -> List[W]:
# the actual windows is calculated in assignStateNamespace
return self._reuse_actual_windows
def prepare_aggregate_accumulator_for_emit(self, window: W):
state_window = self._window_mapping.get(window)
acc = self._ctx.get_window_accumulators(state_window)
if acc is None:
acc = self._window_aggregator.create_accumulators()
self._window_aggregator.set_accumulators(state_window, acc)
def clean_window_if_needed(self, window: W, current_time: int):
if self.is_cleanup_time(window, current_time):
self._ctx.clear_trigger(window)
state_window = self._window_mapping.get(window)
self._ctx.clear_window_state(state_window)
# retire expired window
self._initialize_cache(self._ctx.current_key())
self._window_mapping.remove(window)
self._sorted_windows.remove(window)
def _initialize_cache(self, key):
tuple_key = tuple(key)
self._sorted_windows = self._cached_sorted_windows.get(tuple_key)
if self._sorted_windows is None:
self._sorted_windows = [k for k in self._window_mapping]
self._sorted_windows.sort()
self._cached_sorted_windows.put(tuple_key, self._sorted_windows)
def _add_window(self, new_window: W):
collector = MergeResultCollector()
self._window_assigner.merge_windows(new_window, self._sorted_windows, collector)
result_window = new_window
is_new_window_merged = False
# perform the merge
merge_results = collector.merge_results
for merge_result in merge_results:
merge_windows = merge_results[merge_result] # type: Set[W]
# if our new window is in the merged windows make the merge result the result window
try:
merge_windows.remove(new_window)
is_new_window_merged = True
result_window = merge_result
except KeyError:
pass
# if our new window is the same as a pre-existing window, nothing to do
if not merge_windows:
continue
# pick any of the merged windows and choose that window's state window
# as the state window for the merge result
merged_state_namespace = self._window_mapping.get(iter(merge_windows).__next__())
merged_state_windows = []
# figure out the state windows that we are merging
for merged_window in merge_windows:
res = self._window_mapping.get(merged_window)
if res is not None:
self._window_mapping.remove(merged_window)
self._sorted_windows.remove(merged_window)
# don't put the target state window into the merged windows
if res != merged_state_namespace:
merged_state_windows.append(res)
self._window_mapping.put(merge_result, merged_state_namespace)
self._sorted_windows.append(merge_result)
self._sorted_windows.sort()
# don't merge the new window itself, it never had any state associated with it
# i.e. if we are only merging one pre-existing window into itself
# without extending the pre-existing window
if not (len(merge_windows) == 1 and merge_result in merge_windows):
self._merge(
merge_result, merge_windows, merged_state_namespace, merged_state_windows)
# the new window created a new, self-contained window without merging
if len(merge_results) == 0 or result_window == new_window and not is_new_window_merged:
self._window_mapping.put(result_window, result_window)
self._sorted_windows.append(result_window)
self._sorted_windows.sort()
return result_window
def _merge(self, merge_result: W, merge_windows: Set[W], state_window_result: W,
state_windows_tobe_merged: Iterable[W]):
self._ctx.on_merge(merge_result, state_windows_tobe_merged)
# clear registered timers
for window in merge_windows:
self._ctx.clear_trigger(window)
self._ctx.delete_cleanup_timer(window)
# merge the merged state windows into the newly resulting state window
if state_windows_tobe_merged:
target_acc = self._ctx.get_window_accumulators(state_window_result)
if target_acc is None:
target_acc = self._window_aggregator.create_accumulators()
self._window_aggregator.set_accumulators(state_window_result, target_acc)
for window in state_windows_tobe_merged:
acc = self._ctx.get_window_accumulators(window)
if acc is not None:
self._window_aggregator.merge(window, acc)
# clear merged window
self._ctx.clear_window_state(window)
target_acc = self._window_aggregator.get_accumulators()
self._ctx.set_window_accumulators(state_window_result, target_acc)
| 16,087 | 41.336842 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/window_trigger.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import abstractmethod, ABC
from typing import Generic
from pyflink.common.typeinfo import Types
from pyflink.datastream.state import ValueStateDescriptor
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.table.window_context import TriggerContext, W
class Trigger(Generic[W], ABC):
"""
A Trigger determines when a pane of a window should be evaluated to emit the results for
that part of the window.
A pane is the bucket of elements that have the same key and same Window. An element
an be in multiple panes if it was assigned to multiple windows by the WindowAssigner.
These panes all have their own instance of the Trigger.
Triggers must not maintain state internally since they can be re-created or reused for
different keys. All necessary state should be persisted using the state abstraction available on
the TriggerContext.
"""
@abstractmethod
def open(self, ctx: TriggerContext):
"""
Initialization method for the trigger. Creates states in this method.
:param ctx: A context object that can be used to get states.
"""
pass
@abstractmethod
def on_element(self, element, timestamp, window: W) -> bool:
"""
Called for every element that gets added to a pane. The result of this will determine
whether the pane is evaluated to emit results.
:param element: The element that arrived.
:param timestamp: The timestamp of the element that arrived.
:param window: The window to which the element is being added.
:return: True for firing the window, False for no action
"""
pass
@abstractmethod
def on_processing_time(self, time: int, window: W) -> bool:
"""
Called when a processing-time timer that was set using the trigger context fires.
This method is not called in case the window does not contain any elements. Thus, if
you return PURGE from a trigger method and you expect to do cleanup in a future
invocation of a timer callback it might be wise to clean any state that you would clean in
the timer callback.
:param time: The timestamp at which the timer fired.
:param window: The window for which the timer fired.
:return: True for firing the window, False for no action
"""
pass
@abstractmethod
def on_event_time(self, time: int, window: W) -> bool:
"""
Called when a event-time timer that was set using the trigger context fires.
This method is not called in case the window does not contain any elements. Thus, if
you return PURGE from a trigger method and you expect to do cleanup in a future
invocation of a timer callback it might be wise to clean any state that you would clean in
the timer callback.
:param time: The timestamp at which the timer fired.
:param window: The window for which the timer fired.
:return: True for firing the window, False for no action
"""
pass
@abstractmethod
def on_merge(self, window: W, merge_context: TriggerContext):
"""
Called when several windows have been merged into one window by the WindowAssigner.
"""
pass
@abstractmethod
def clear(self, window: W) -> None:
"""
Clears any state that the trigger might still hold for the given window. This is called when
a window is purged. Timers set using TriggerContext.register_event_time_timer(int) and
TriggerContext.register_processing_time_timer(int) should be deleted here as well as
state acquired using TriggerContext.get_partitioned_state(StateDescriptor).
"""
pass
class ProcessingTimeTrigger(Trigger[TimeWindow]):
"""
A Trigger that fires once the current system time passes the end of the window to which a
pane belongs.
"""
def __init__(self):
self._ctx = None # type: TriggerContext
def open(self, ctx: TriggerContext):
self._ctx = ctx
def on_element(self, element, timestamp, window: W) -> bool:
self._ctx.register_processing_time_timer(window.max_timestamp())
return False
def on_processing_time(self, time: int, window: W) -> bool:
return time == window.max_timestamp()
def on_event_time(self, time: int, window: W) -> bool:
return False
def on_merge(self, window: W, merge_context: TriggerContext):
self._ctx.register_processing_time_timer(window.max_timestamp())
def clear(self, window: W) -> None:
self._ctx.delete_processing_time_timer(window.max_timestamp())
class EventTimeTrigger(Trigger[TimeWindow]):
"""
A Trigger that fires once the watermark passes the end of the window to which a pane
belongs.
"""
def __init__(self):
self._ctx = None # type: TriggerContext
def open(self, ctx: TriggerContext):
self._ctx = ctx
def on_element(self, element, timestamp, window: W) -> bool:
if window.max_timestamp() <= self._ctx.get_current_watermark():
# if the watermark is already past the window fire immediately
return True
else:
self._ctx.register_event_time_timer(window.max_timestamp())
return False
def on_processing_time(self, time: int, window: W) -> bool:
return False
def on_event_time(self, time: int, window: W) -> bool:
return time == window.max_timestamp()
def on_merge(self, window: W, merge_context: TriggerContext):
self._ctx.register_event_time_timer(window.max_timestamp())
def clear(self, window: W) -> None:
self._ctx.delete_event_time_timer(window.max_timestamp())
class CountTrigger(Trigger[CountWindow]):
"""
A Trigger that fires once the count of elements in a pane reaches the given count.
"""
def __init__(self, count_elements: int):
self._count_elements = count_elements
self._count_state_desc = ValueStateDescriptor(
"trigger-count-%s" % count_elements, Types.LONG())
self._ctx = None # type: TriggerContext
def open(self, ctx: TriggerContext):
self._ctx = ctx
def on_element(self, element, timestamp, window: W) -> bool:
count_state = self._ctx.get_partitioned_state(self._count_state_desc)
count = count_state.value()
if count is None:
count = 0
count += 1
count_state.update(count)
if count >= self._count_elements:
count_state.clear()
return True
else:
return False
def on_processing_time(self, time: int, window: W) -> bool:
return False
def on_event_time(self, time: int, window: W) -> bool:
return False
def on_merge(self, window: W, merge_context: TriggerContext):
merge_context.merge_partitioned_state(self._count_state_desc)
def clear(self, window: W) -> None:
self._ctx.get_partitioned_state(self._count_state_desc).clear()
| 8,072 | 36.901408 | 100 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.