repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
flink
|
flink-master/flink-python/pyflink/datastream/formats/tests/test_json.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Types
from pyflink.datastream.formats.json import JsonRowSerializationSchema, JsonRowDeserializationSchema
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkTestCase
class JsonSerializationSchemaTests(PyFlinkTestCase):
def test_json_row_serialization_deserialization_schema(self):
jvm = get_gateway().jvm
jsons = ["{\"svt\":\"2020-02-24T12:58:09.209+0800\"}",
"{\"svt\":\"2020-02-24T12:58:09.209+0800\", "
"\"ops\":{\"id\":\"281708d0-4092-4c21-9233-931950b6eccf\"},\"ids\":[1, 2, 3]}",
"{\"svt\":\"2020-02-24T12:58:09.209+0800\"}"]
expected_jsons = ["{\"svt\":\"2020-02-24T12:58:09.209+0800\",\"ops\":null,\"ids\":null}",
"{\"svt\":\"2020-02-24T12:58:09.209+0800\","
"\"ops\":{\"id\":\"281708d0-4092-4c21-9233-931950b6eccf\"},"
"\"ids\":[1,2,3]}",
"{\"svt\":\"2020-02-24T12:58:09.209+0800\",\"ops\":null,\"ids\":null}"]
row_schema = Types.ROW_NAMED(["svt", "ops", "ids"],
[Types.STRING(),
Types.ROW_NAMED(['id'], [Types.STRING()]),
Types.PRIMITIVE_ARRAY(Types.INT())])
json_row_serialization_schema = JsonRowSerializationSchema.builder() \
.with_type_info(row_schema).build()
json_row_deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(row_schema).build()
json_row_serialization_schema._j_serialization_schema.open(
jvm.org.apache.flink.connector.testutils.formats.DummyInitializationContext())
json_row_deserialization_schema._j_deserialization_schema.open(
jvm.org.apache.flink.connector.testutils.formats.DummyInitializationContext())
for i in range(len(jsons)):
j_row = json_row_deserialization_schema._j_deserialization_schema\
.deserialize(bytes(jsons[i], encoding='utf-8'))
result = str(json_row_serialization_schema._j_serialization_schema.serialize(j_row),
encoding='utf-8')
self.assertEqual(expected_jsons[i], result)
| 3,258 | 55.189655 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/tests/test_parquet.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import calendar
import datetime
import glob
import os
import tempfile
import time
import unittest
from decimal import Decimal
from typing import List, Tuple
import pandas as pd
import pytz
from pyflink.common.time import Instant
from pyflink.common import Configuration, Row
from pyflink.common.typeinfo import RowTypeInfo, Types
from pyflink.common.watermark_strategy import WatermarkStrategy
from pyflink.datastream.connectors.file_system import FileSource, FileSink
from pyflink.datastream.formats.tests.test_avro import \
_create_basic_avro_schema_and_py_objects, _check_basic_avro_schema_results, \
_create_enum_avro_schema_and_py_objects, _check_enum_avro_schema_results, \
_create_union_avro_schema_and_py_objects, _check_union_avro_schema_results, \
_create_array_avro_schema_and_py_objects, _check_array_avro_schema_results, \
_create_map_avro_schema_and_py_objects, _check_map_avro_schema_results, \
_create_map_avro_schema_and_records, _create_array_avro_schema_and_records, \
_create_union_avro_schema_and_records, _create_enum_avro_schema_and_records, \
_create_basic_avro_schema_and_records, _import_avro_classes
from pyflink.datastream.formats.avro import GenericRecordAvroTypeInfo, AvroSchema
from pyflink.datastream.formats.parquet import AvroParquetReaders, ParquetColumnarRowInputFormat, \
AvroParquetWriters, ParquetBulkWriters
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.datastream.utils import create_hadoop_configuration
from pyflink.java_gateway import get_gateway
from pyflink.table.types import RowType, DataTypes, _to_java_data_type
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase, to_java_data_structure
@unittest.skipIf(os.environ.get('HADOOP_CLASSPATH') is None,
'Some Hadoop lib is needed for Parquet-Avro format tests')
class FileSourceAvroParquetReadersTests(PyFlinkStreamingTestCase):
def setUp(self):
super().setUp()
self.test_sink = DataStreamTestSinkFunction()
_import_avro_classes()
def test_parquet_avro_basic(self):
parquet_file_name = tempfile.mktemp(suffix='.parquet', dir=self.tempdir)
schema, records = _create_basic_avro_schema_and_records()
self._create_parquet_avro_file(parquet_file_name, schema, records)
self._build_parquet_avro_job(schema, parquet_file_name)
self.env.execute("test_parquet_avro_basic")
results = self.test_sink.get_results(True, False)
_check_basic_avro_schema_results(self, results)
def test_parquet_avro_enum(self):
parquet_file_name = tempfile.mktemp(suffix='.parquet', dir=self.tempdir)
schema, records = _create_enum_avro_schema_and_records()
self._create_parquet_avro_file(parquet_file_name, schema, records)
self._build_parquet_avro_job(schema, parquet_file_name)
self.env.execute("test_parquet_avro_enum")
results = self.test_sink.get_results(True, False)
_check_enum_avro_schema_results(self, results)
def test_parquet_avro_union(self):
parquet_file_name = tempfile.mktemp(suffix='.parquet', dir=self.tempdir)
schema, records = _create_union_avro_schema_and_records()
self._create_parquet_avro_file(parquet_file_name, schema, records)
self._build_parquet_avro_job(schema, parquet_file_name)
self.env.execute("test_parquet_avro_union")
results = self.test_sink.get_results(True, False)
_check_union_avro_schema_results(self, results)
def test_parquet_avro_array(self):
parquet_file_name = tempfile.mktemp(suffix='.parquet', dir=self.tempdir)
schema, records = _create_array_avro_schema_and_records()
self._create_parquet_avro_file(parquet_file_name, schema, records)
self._build_parquet_avro_job(schema, parquet_file_name)
self.env.execute("test_parquet_avro_array")
results = self.test_sink.get_results(True, False)
_check_array_avro_schema_results(self, results)
def test_parquet_avro_map(self):
parquet_file_name = tempfile.mktemp(suffix='.parquet', dir=self.tempdir)
schema, records = _create_map_avro_schema_and_records()
self._create_parquet_avro_file(parquet_file_name, schema, records)
self._build_parquet_avro_job(schema, parquet_file_name)
self.env.execute("test_parquet_avro_map")
results = self.test_sink.get_results(True, False)
_check_map_avro_schema_results(self, results)
def _build_parquet_avro_job(self, record_schema, *parquet_file_name):
ds = self.env.from_source(
FileSource.for_record_stream_format(
AvroParquetReaders.for_generic_record(record_schema),
*parquet_file_name
).build(),
WatermarkStrategy.for_monotonous_timestamps(),
"parquet-source"
)
ds.map(lambda e: e).add_sink(self.test_sink)
@staticmethod
def _create_parquet_avro_file(file_path: str, schema: AvroSchema, records: list):
jvm = get_gateway().jvm
j_path = jvm.org.apache.flink.core.fs.Path(file_path)
writer = jvm.org.apache.flink.formats.parquet.avro.AvroParquetWriters \
.forGenericRecord(schema._j_schema) \
.create(j_path.getFileSystem().create(
j_path,
jvm.org.apache.flink.core.fs.FileSystem.WriteMode.OVERWRITE
))
for record in records:
writer.addElement(record)
writer.flush()
writer.finish()
@unittest.skipIf(os.environ.get('HADOOP_CLASSPATH') is None,
'Some Hadoop lib is needed for Parquet-Avro format tests')
class FileSinkAvroParquetWritersTests(PyFlinkStreamingTestCase):
def setUp(self):
super().setUp()
# NOTE: parallelism == 1 is required to keep the order of results
self.env.set_parallelism(1)
self.parquet_dir_name = tempfile.mkdtemp(dir=self.tempdir)
self.test_sink = DataStreamTestSinkFunction()
def test_parquet_avro_basic_write(self):
schema, objects = _create_basic_avro_schema_and_py_objects()
self._build_avro_parquet_job(schema, objects)
self.env.execute('test_parquet_avro_basic_write')
results = self._read_parquet_avro_file(schema)
_check_basic_avro_schema_results(self, results)
def test_parquet_avro_enum_write(self):
schema, objects = _create_enum_avro_schema_and_py_objects()
self._build_avro_parquet_job(schema, objects)
self.env.execute('test_parquet_avro_enum_write')
results = self._read_parquet_avro_file(schema)
_check_enum_avro_schema_results(self, results)
def test_parquet_avro_union_write(self):
schema, objects = _create_union_avro_schema_and_py_objects()
self._build_avro_parquet_job(schema, objects)
self.env.execute('test_parquet_avro_union_write')
results = self._read_parquet_avro_file(schema)
_check_union_avro_schema_results(self, results)
def test_parquet_avro_array_write(self):
schema, objects = _create_array_avro_schema_and_py_objects()
self._build_avro_parquet_job(schema, objects)
self.env.execute('test_parquet_avro_array_write')
results = self._read_parquet_avro_file(schema)
_check_array_avro_schema_results(self, results)
def test_parquet_avro_map_write(self):
schema, objects = _create_map_avro_schema_and_py_objects()
self._build_avro_parquet_job(schema, objects)
self.env.execute('test_parquet_avro_map_write')
results = self._read_parquet_avro_file(schema)
_check_map_avro_schema_results(self, results)
def _build_avro_parquet_job(self, schema, objects):
ds = self.env.from_collection(objects)
avro_type_info = GenericRecordAvroTypeInfo(schema)
sink = FileSink.for_bulk_format(
self.parquet_dir_name, AvroParquetWriters.for_generic_record(schema)
).build()
ds.map(lambda e: e, output_type=avro_type_info).sink_to(sink)
def _read_parquet_avro_file(self, schema) -> List[dict]:
parquet_files = [f for f in glob.glob(self.parquet_dir_name, recursive=True)]
FileSourceAvroParquetReadersTests._build_parquet_avro_job(self, schema, *parquet_files)
self.env.execute()
return self.test_sink.get_results(True, False)
@unittest.skipIf(os.environ.get('HADOOP_CLASSPATH') is None,
'Some Hadoop lib is needed for Parquet Columnar format tests')
class FileSourceParquetColumnarRowInputFormatTests(PyFlinkStreamingTestCase):
def setUp(self):
super().setUp()
self.test_sink = DataStreamTestSinkFunction()
self.parquet_file_name = tempfile.mktemp(suffix='.parquet', dir=self.tempdir)
def test_parquet_columnar_basic_read(self):
os.environ['TZ'] = 'Asia/Shanghai'
time.tzset()
row_type, _, data = _create_parquet_basic_row_and_data()
_write_row_data_to_parquet_file(self.parquet_file_name, row_type, data)
self._build_parquet_columnar_job(row_type)
self.env.execute('test_parquet_columnar_basic_read')
results = self.test_sink.get_results(True, False)
_check_parquet_basic_results(self, results)
def _build_parquet_columnar_job(self, row_type: RowType):
source = FileSource.for_bulk_file_format(
ParquetColumnarRowInputFormat(row_type, Configuration(), 10, True, False),
self.parquet_file_name
).build()
ds = self.env.from_source(source, WatermarkStrategy.no_watermarks(), 'parquet-source')
ds.map(lambda e: e).add_sink(self.test_sink)
@unittest.skipIf(os.environ.get('HADOOP_CLASSPATH') is None,
'Some Hadoop lib is needed for Parquet RowData format tests')
class FileSinkParquetBulkWriterTests(PyFlinkStreamingTestCase):
def setUp(self):
super().setUp()
# NOTE: parallelism == 1 is required to keep the order of results
self.env.set_parallelism(1)
self.parquet_dir_name = tempfile.mkdtemp(dir=self.tempdir)
def test_parquet_row_data_basic_write(self):
os.environ['TZ'] = 'Asia/Shanghai'
time.tzset()
row_type, row_type_info, data = _create_parquet_basic_row_and_data()
self._build_parquet_job(row_type, row_type_info, data)
self.env.execute('test_parquet_row_data_basic_write')
results = self._read_parquet_file()
_check_parquet_basic_results(self, results)
def test_parquet_row_data_array_write(self):
row_type, row_type_info, data = _create_parquet_array_row_and_data()
self._build_parquet_job(row_type, row_type_info, data)
self.env.execute('test_parquet_row_data_array_write')
results = self._read_parquet_file()
_check_parquet_array_results(self, results)
@unittest.skip('ParquetSchemaConverter in flink-parquet annotate map keys as optional, but '
'Arrow restricts them to be required')
def test_parquet_row_data_map_write(self):
row_type, row_type_info, data = _create_parquet_map_row_and_data()
self._build_parquet_job(row_type, row_type_info, data)
self.env.execute('test_parquet_row_data_map_write')
results = self._read_parquet_file()
_check_parquet_map_results(self, results)
def _build_parquet_job(self, row_type: RowType, row_type_info: RowTypeInfo, data: List[Row]):
sink = FileSink.for_bulk_format(
self.parquet_dir_name, ParquetBulkWriters.for_row_type(row_type, utc_timestamp=True)
).build()
ds = self.env.from_collection(data, type_info=row_type_info)
ds.sink_to(sink)
def _read_parquet_file(self):
records = []
for file in glob.glob(os.path.join(os.path.join(self.parquet_dir_name, '**/*'))):
df = pd.read_parquet(file)
for i in range(df.shape[0]):
records.append(df.loc[i])
return records
def _write_row_data_to_parquet_file(path: str, row_type: RowType, rows: List[Row]):
jvm = get_gateway().jvm
flink = jvm.org.apache.flink
j_output_stream = flink.core.fs.local.LocalDataOutputStream(jvm.java.io.File(path))
j_bulk_writer = flink.formats.parquet.row.ParquetRowDataBuilder.createWriterFactory(
_to_java_data_type(row_type).getLogicalType(),
create_hadoop_configuration(Configuration()),
True,
).create(j_output_stream)
row_row_converter = flink.table.data.conversion.RowRowConverter.create(
_to_java_data_type(row_type)
)
row_row_converter.open(row_row_converter.getClass().getClassLoader())
for row in rows:
j_bulk_writer.addElement(row_row_converter.toInternal(to_java_data_structure(row)))
j_bulk_writer.finish()
def _create_parquet_basic_row_and_data() -> Tuple[RowType, RowTypeInfo, List[Row]]:
row_type = DataTypes.ROW([
DataTypes.FIELD('char', DataTypes.CHAR(10)),
DataTypes.FIELD('varchar', DataTypes.VARCHAR(10)),
DataTypes.FIELD('binary', DataTypes.BINARY(10)),
DataTypes.FIELD('varbinary', DataTypes.VARBINARY(10)),
DataTypes.FIELD('boolean', DataTypes.BOOLEAN()),
DataTypes.FIELD('decimal', DataTypes.DECIMAL(2, 0)),
DataTypes.FIELD('int', DataTypes.INT()),
DataTypes.FIELD('bigint', DataTypes.BIGINT()),
DataTypes.FIELD('double', DataTypes.DOUBLE()),
DataTypes.FIELD('date', DataTypes.DATE().bridged_to('java.sql.Date')),
DataTypes.FIELD('time', DataTypes.TIME().bridged_to('java.sql.Time')),
DataTypes.FIELD('timestamp', DataTypes.TIMESTAMP(3).bridged_to('java.sql.Timestamp')),
DataTypes.FIELD('timestamp_ltz', DataTypes.TIMESTAMP_LTZ(3)),
])
row_type_info = Types.ROW_NAMED(
['char', 'varchar', 'binary', 'varbinary', 'boolean', 'decimal', 'int', 'bigint', 'double',
'date', 'time', 'timestamp', 'timestamp_ltz'],
[Types.STRING(), Types.STRING(), Types.PRIMITIVE_ARRAY(Types.BYTE()),
Types.PRIMITIVE_ARRAY(Types.BYTE()), Types.BOOLEAN(), Types.BIG_DEC(), Types.INT(),
Types.LONG(), Types.DOUBLE(), Types.SQL_DATE(), Types.SQL_TIME(), Types.SQL_TIMESTAMP(),
Types.INSTANT()]
)
datetime_ltz = datetime.datetime(1970, 2, 3, 4, 5, 6, 700000, tzinfo=pytz.timezone('UTC'))
timestamp_ltz = Instant.of_epoch_milli(
(
calendar.timegm(datetime_ltz.utctimetuple()) +
calendar.timegm(time.localtime(0))
) * 1000 + datetime_ltz.microsecond // 1000
)
data = [Row(
char='char',
varchar='varchar',
binary=b'binary',
varbinary=b'varbinary',
boolean=True,
decimal=Decimal(1.5),
int=2147483647,
bigint=-9223372036854775808,
double=2e-308,
date=datetime.date(1970, 1, 1),
time=datetime.time(1, 1, 1),
timestamp=datetime.datetime(1970, 1, 2, 3, 4, 5, 600000),
timestamp_ltz=timestamp_ltz
)]
return row_type, row_type_info, data
def _check_parquet_basic_results(test, results):
row = results[0]
test.assertEqual(row['char'], 'char')
test.assertEqual(row['varchar'], 'varchar')
test.assertEqual(row['binary'], b'binary')
test.assertEqual(row['varbinary'], b'varbinary')
test.assertEqual(row['boolean'], True)
test.assertAlmostEqual(row['decimal'], 2)
test.assertEqual(row['int'], 2147483647)
test.assertEqual(row['bigint'], -9223372036854775808)
test.assertAlmostEqual(row['double'], 2e-308, delta=1e-311)
test.assertEqual(row['date'], datetime.date(1970, 1, 1))
test.assertEqual(row['time'], datetime.time(1, 1, 1))
ts = row['timestamp']
if isinstance(ts, pd.Timestamp):
ts = ts.to_pydatetime()
test.assertEqual(ts, datetime.datetime(1970, 1, 2, 3, 4, 5, 600000))
ts_ltz = row['timestamp_ltz']
if isinstance(ts_ltz, pd.Timestamp):
ts_ltz = pytz.timezone('Asia/Shanghai').localize(ts_ltz.to_pydatetime())
test.assertEqual(
ts_ltz,
pytz.timezone('Asia/Shanghai').localize(datetime.datetime(1970, 2, 3, 12, 5, 6, 700000))
)
def _create_parquet_array_row_and_data() -> Tuple[RowType, RowTypeInfo, List[Row]]:
row_type = DataTypes.ROW([
DataTypes.FIELD(
'string_array',
DataTypes.ARRAY(DataTypes.STRING()).bridged_to('java.util.ArrayList')
),
DataTypes.FIELD(
'int_array',
DataTypes.ARRAY(DataTypes.INT()).bridged_to('java.util.ArrayList')
),
])
row_type_info = Types.ROW_NAMED([
'string_array',
'int_array',
], [
Types.LIST(Types.STRING()),
Types.LIST(Types.INT()),
])
data = [Row(
string_array=['a', 'b', 'c'],
int_array=[1, 2, 3],
)]
return row_type, row_type_info, data
def _check_parquet_array_results(test, results):
row = results[0]
test.assertEqual(row['string_array'][0], 'a')
test.assertEqual(row['string_array'][1], 'b')
test.assertEqual(row['string_array'][2], 'c')
test.assertEqual(row['int_array'][0], 1)
test.assertEqual(row['int_array'][1], 2)
test.assertEqual(row['int_array'][2], 3)
def _create_parquet_map_row_and_data() -> Tuple[RowType, RowTypeInfo, List[Row]]:
row_type = DataTypes.ROW([
DataTypes.FIELD('map', DataTypes.MAP(DataTypes.INT(), DataTypes.STRING())),
])
row_type_info = Types.ROW_NAMED(['map'], [Types.MAP(Types.INT(), Types.STRING())])
data = [Row(
map={0: 'a', 1: 'b', 2: 'c'}
)]
return row_type, row_type_info, data
def _check_parquet_map_results(test, results):
m = {k: v for k, v in results[0]['map']}
test.assertEqual(m[0], 'a')
test.assertEqual(m[1], 'b')
test.assertEqual(m[2], 'c')
| 18,810 | 43.68171 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/tests/test_orc.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import glob
import os
import tempfile
import unittest
from datetime import date, datetime
from decimal import Decimal
from typing import List, Tuple
import pandas as pd
from pyflink.common import Row
from pyflink.common.typeinfo import RowTypeInfo, Types
from pyflink.datastream import DataStream
from pyflink.datastream.connectors.file_system import FileSink
from pyflink.datastream.formats.orc import OrcBulkWriters
from pyflink.datastream.formats.tests.test_parquet import _create_parquet_array_row_and_data, \
_check_parquet_array_results, _create_parquet_map_row_and_data, _check_parquet_map_results
from pyflink.java_gateway import get_gateway
from pyflink.table.types import RowType, DataTypes
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase, to_java_data_structure
@unittest.skipIf(os.environ.get('HADOOP_CLASSPATH') is None,
'Some Hadoop lib is needed for Orc format tests')
class FileSinkOrcBulkWriterTests(PyFlinkStreamingTestCase):
def setUp(self):
super().setUp()
self.env.set_parallelism(1)
self.orc_dir_name = tempfile.mkdtemp(dir=self.tempdir)
def test_orc_basic_write(self):
row_type, row_type_info, data = _create_orc_basic_row_and_data()
self._build_orc_job(row_type, row_type_info, data)
self.env.execute('test_orc_basic_write')
results = self._read_orc_file()
_check_orc_basic_results(self, results)
def test_orc_array_write(self):
row_type, row_type_info, data = _create_parquet_array_row_and_data()
self._build_orc_job(row_type, row_type_info, data)
self.env.execute()
results = self._read_orc_file()
_check_parquet_array_results(self, results)
def test_orc_map_write(self):
row_type, row_type_info, data = _create_parquet_map_row_and_data()
self._build_orc_job(row_type, row_type_info, data)
self.env.execute()
results = self._read_orc_file()
_check_parquet_map_results(self, results)
def _build_orc_job(self, row_type: RowType, row_type_info: RowTypeInfo, data: List[Row]):
jvm = get_gateway().jvm
sink = FileSink.for_bulk_format(
self.orc_dir_name, OrcBulkWriters.for_row_type(row_type)
).build()
j_list = jvm.java.util.ArrayList()
for d in data:
j_list.add(to_java_data_structure(d))
ds = DataStream(self.env._j_stream_execution_environment.fromCollection(
j_list,
row_type_info.get_java_type_info()
))
ds.sink_to(sink)
def _read_orc_file(self):
records = []
for file in glob.glob(os.path.join(os.path.join(self.orc_dir_name, '**/*'))):
df = pd.read_orc(file)
for i in range(df.shape[0]):
records.append(df.loc[i])
return records
def _create_orc_basic_row_and_data() -> Tuple[RowType, RowTypeInfo, List[Row]]:
row_type = DataTypes.ROW([
DataTypes.FIELD('char', DataTypes.CHAR(10)),
DataTypes.FIELD('varchar', DataTypes.VARCHAR(10)),
DataTypes.FIELD('bytes', DataTypes.BYTES()),
DataTypes.FIELD('boolean', DataTypes.BOOLEAN()),
DataTypes.FIELD('decimal', DataTypes.DECIMAL(2, 0)),
DataTypes.FIELD('int', DataTypes.INT()),
DataTypes.FIELD('bigint', DataTypes.BIGINT()),
DataTypes.FIELD('double', DataTypes.DOUBLE()),
DataTypes.FIELD('date', DataTypes.DATE().bridged_to('java.sql.Date')),
DataTypes.FIELD('timestamp', DataTypes.TIMESTAMP(3).bridged_to('java.sql.Timestamp')),
])
row_type_info = Types.ROW_NAMED(
['char', 'varchar', 'bytes', 'boolean', 'decimal', 'int', 'bigint', 'double',
'date', 'timestamp'],
[Types.STRING(), Types.STRING(), Types.PRIMITIVE_ARRAY(Types.BYTE()), Types.BOOLEAN(),
Types.BIG_DEC(), Types.INT(), Types.LONG(), Types.DOUBLE(), Types.SQL_DATE(),
Types.SQL_TIMESTAMP()]
)
data = [Row(
char='char',
varchar='varchar',
bytes=b'varbinary',
boolean=True,
decimal=Decimal(1.5),
int=2147483647,
bigint=-9223372036854775808,
double=2e-308,
date=date(1970, 1, 1),
timestamp=datetime(1970, 1, 2, 3, 4, 5, 600000),
)]
return row_type, row_type_info, data
def _check_orc_basic_results(test, results):
row = results[0]
test.assertEqual(row['char'], b'char ')
test.assertEqual(row['varchar'], 'varchar')
test.assertEqual(row['bytes'], b'varbinary')
test.assertEqual(row['boolean'], True)
test.assertAlmostEqual(row['decimal'], 2)
test.assertEqual(row['int'], 2147483647)
test.assertEqual(row['bigint'], -9223372036854775808)
test.assertAlmostEqual(row['double'], 2e-308, delta=1e-311)
test.assertEqual(row['date'], date(1970, 1, 1))
test.assertEqual(
row['timestamp'].to_pydatetime(),
datetime(1970, 1, 2, 3, 4, 5, 600000),
)
| 5,925 | 40.440559 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/tests/test_avro.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import glob
import os
import tempfile
from typing import Tuple, List
from avro.datafile import DataFileReader
from avro.io import DatumReader
from py4j.java_gateway import JavaObject, java_import
from pyflink.datastream import MapFunction
from pyflink.datastream.connectors.file_system import FileSink
from pyflink.datastream.formats.avro import AvroSchema, GenericRecordAvroTypeInfo, \
AvroBulkWriters, AvroInputFormat
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkUTTestCase
class FileSourceAvroInputFormatTests(PyFlinkUTTestCase):
def setUp(self):
super().setUp()
self.test_sink = DataStreamTestSinkFunction()
self.avro_file_name = tempfile.mktemp(suffix='.avro', dir=self.tempdir)
_import_avro_classes()
def test_avro_basic_read(self):
schema, records = _create_basic_avro_schema_and_records()
self._create_avro_file(schema, records)
self._build_avro_job(schema)
self.env.execute('test_avro_basic_read')
results = self.test_sink.get_results(True, False)
_check_basic_avro_schema_results(self, results)
def test_avro_enum_read(self):
schema, records = _create_enum_avro_schema_and_records()
self._create_avro_file(schema, records)
self._build_avro_job(schema)
self.env.execute('test_avro_enum_read')
results = self.test_sink.get_results(True, False)
_check_enum_avro_schema_results(self, results)
def test_avro_union_read(self):
schema, records = _create_union_avro_schema_and_records()
self._create_avro_file(schema, records)
self._build_avro_job(schema)
self.env.execute('test_avro_union_read')
results = self.test_sink.get_results(True, False)
_check_union_avro_schema_results(self, results)
def test_avro_array_read(self):
schema, records = _create_array_avro_schema_and_records()
self._create_avro_file(schema, records)
self._build_avro_job(schema)
self.env.execute('test_avro_array_read')
results = self.test_sink.get_results(True, False)
_check_array_avro_schema_results(self, results)
def test_avro_map_read(self):
schema, records = _create_map_avro_schema_and_records()
self._create_avro_file(schema, records)
self._build_avro_job(schema)
self.env.execute('test_avro_map_read')
results = self.test_sink.get_results(True, False)
_check_map_avro_schema_results(self, results)
def _build_avro_job(self, record_schema):
ds = self.env.create_input(AvroInputFormat(self.avro_file_name, record_schema))
ds.map(PassThroughMapFunction()).add_sink(self.test_sink)
def _create_avro_file(self, schema: AvroSchema, records: list):
jvm = get_gateway().jvm
j_file = jvm.java.io.File(self.avro_file_name)
j_datum_writer = jvm.org.apache.flink.avro.shaded.org.apache.avro.generic \
.GenericDatumWriter()
j_file_writer = jvm.org.apache.flink.avro.shaded.org.apache.avro.file \
.DataFileWriter(j_datum_writer)
j_file_writer.create(schema._j_schema, j_file)
for r in records:
j_file_writer.append(r)
j_file_writer.close()
class FileSinkAvroWritersTests(PyFlinkUTTestCase):
def setUp(self) -> None:
super().setUp()
# NOTE: parallelism == 1 is required to keep the order of results
self.env.set_parallelism(1)
self.avro_dir_name = tempfile.mkdtemp(dir=self.tempdir)
def test_avro_basic_write(self):
schema, objects = _create_basic_avro_schema_and_py_objects()
self._build_avro_job(schema, objects)
self.env.execute('test_avro_basic_write')
results = self._read_avro_file()
_check_basic_avro_schema_results(self, results)
def test_avro_enum_write(self):
schema, objects = _create_enum_avro_schema_and_py_objects()
self._build_avro_job(schema, objects)
self.env.execute('test_avro_enum_write')
results = self._read_avro_file()
_check_enum_avro_schema_results(self, results)
def test_avro_union_write(self):
schema, objects = _create_union_avro_schema_and_py_objects()
self._build_avro_job(schema, objects)
self.env.execute('test_avro_union_write')
results = self._read_avro_file()
_check_union_avro_schema_results(self, results)
def test_avro_array_write(self):
schema, objects = _create_array_avro_schema_and_py_objects()
self._build_avro_job(schema, objects)
self.env.execute('test_avro_array_write')
results = self._read_avro_file()
_check_array_avro_schema_results(self, results)
def test_avro_map_write(self):
schema, objects = _create_map_avro_schema_and_py_objects()
self._build_avro_job(schema, objects)
self.env.execute('test_avro_map_write')
results = self._read_avro_file()
_check_map_avro_schema_results(self, results)
def _build_avro_job(self, schema, objects):
ds = self.env.from_collection(objects)
sink = FileSink.for_bulk_format(
self.avro_dir_name, AvroBulkWriters.for_generic_record(schema)
).build()
ds.map(lambda e: e, output_type=GenericRecordAvroTypeInfo(schema)).sink_to(sink)
def _read_avro_file(self) -> List[dict]:
records = []
for file in glob.glob(os.path.join(os.path.join(self.avro_dir_name, '**/*'))):
for record in DataFileReader(open(file, 'rb'), DatumReader()):
records.append(record)
return records
class PassThroughMapFunction(MapFunction):
def map(self, value):
return value
def _import_avro_classes():
jvm = get_gateway().jvm
classes = ['org.apache.avro.generic.GenericData']
prefix = 'org.apache.flink.avro.shaded.'
for cls in classes:
java_import(jvm, prefix + cls)
BASIC_SCHEMA = """
{
"type": "record",
"name": "test",
"fields": [
{ "name": "null", "type": "null" },
{ "name": "boolean", "type": "boolean" },
{ "name": "int", "type": "int" },
{ "name": "long", "type": "long" },
{ "name": "float", "type": "float" },
{ "name": "double", "type": "double" },
{ "name": "string", "type": "string" }
]
}
"""
def _create_basic_avro_schema_and_records() -> Tuple[AvroSchema, List[JavaObject]]:
schema = AvroSchema.parse_string(BASIC_SCHEMA)
records = [_create_basic_avro_record(schema, True, 0, 1, 2, 3, 's1'),
_create_basic_avro_record(schema, False, 4, 5, 6, 7, 's2')]
return schema, records
def _create_basic_avro_schema_and_py_objects() -> Tuple[AvroSchema, List[dict]]:
schema = AvroSchema.parse_string(BASIC_SCHEMA)
objects = [
{'null': None, 'boolean': True, 'int': 0, 'long': 1,
'float': 2., 'double': 3., 'string': 's1'},
{'null': None, 'boolean': False, 'int': 4, 'long': 5,
'float': 6., 'double': 7., 'string': 's2'},
]
return schema, objects
def _check_basic_avro_schema_results(test, results):
result1 = results[0]
result2 = results[1]
test.assertEqual(result1['null'], None)
test.assertEqual(result1['boolean'], True)
test.assertEqual(result1['int'], 0)
test.assertEqual(result1['long'], 1)
test.assertAlmostEqual(result1['float'], 2, delta=1e-3)
test.assertAlmostEqual(result1['double'], 3, delta=1e-3)
test.assertEqual(result1['string'], 's1')
test.assertEqual(result2['null'], None)
test.assertEqual(result2['boolean'], False)
test.assertEqual(result2['int'], 4)
test.assertEqual(result2['long'], 5)
test.assertAlmostEqual(result2['float'], 6, delta=1e-3)
test.assertAlmostEqual(result2['double'], 7, delta=1e-3)
test.assertEqual(result2['string'], 's2')
ENUM_SCHEMA = """
{
"type": "record",
"name": "test",
"fields": [
{
"name": "suit",
"type": {
"type": "enum",
"name": "Suit",
"symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
}
}
]
}
"""
def _create_enum_avro_schema_and_records() -> Tuple[AvroSchema, List[JavaObject]]:
schema = AvroSchema.parse_string(ENUM_SCHEMA)
records = [_create_enum_avro_record(schema, 'SPADES'),
_create_enum_avro_record(schema, 'DIAMONDS')]
return schema, records
def _create_enum_avro_schema_and_py_objects() -> Tuple[AvroSchema, List[dict]]:
schema = AvroSchema.parse_string(ENUM_SCHEMA)
records = [
{'suit': 'SPADES'},
{'suit': 'DIAMONDS'},
]
return schema, records
def _check_enum_avro_schema_results(test, results):
test.assertEqual(results[0]['suit'], 'SPADES')
test.assertEqual(results[1]['suit'], 'DIAMONDS')
UNION_SCHEMA = """
{
"type": "record",
"name": "test",
"fields": [
{
"name": "union",
"type": [ "int", "double", "null" ]
}
]
}
"""
def _create_union_avro_schema_and_records() -> Tuple[AvroSchema, List[JavaObject]]:
schema = AvroSchema.parse_string(UNION_SCHEMA)
records = [_create_union_avro_record(schema, 1),
_create_union_avro_record(schema, 2.),
_create_union_avro_record(schema, None)]
return schema, records
def _create_union_avro_schema_and_py_objects() -> Tuple[AvroSchema, List[dict]]:
schema = AvroSchema.parse_string(UNION_SCHEMA)
records = [
{'union': 1},
{'union': 2.},
{'union': None},
]
return schema, records
def _check_union_avro_schema_results(test, results):
test.assertEqual(results[0]['union'], 1)
test.assertAlmostEqual(results[1]['union'], 2.0, delta=1e-3)
test.assertEqual(results[2]['union'], None)
# It seems there's bug when array item record contains only one field, which throws
# java.lang.ClassCastException: required ... is not a group when reading
ARRAY_SCHEMA = """
{
"type": "record",
"name": "test",
"fields": [
{
"name": "array",
"type": {
"type": "array",
"items": {
"type": "record",
"name": "item",
"fields": [
{ "name": "int", "type": "int" },
{ "name": "double", "type": "double" }
]
}
}
}
]
}
"""
def _create_array_avro_schema_and_records() -> Tuple[AvroSchema, List[JavaObject]]:
schema = AvroSchema.parse_string(ARRAY_SCHEMA)
records = [_create_array_avro_record(schema, [(1, 2.), (3, 4.)]),
_create_array_avro_record(schema, [(5, 6.), (7, 8.)])]
return schema, records
def _create_array_avro_schema_and_py_objects() -> Tuple[AvroSchema, List[dict]]:
schema = AvroSchema.parse_string(ARRAY_SCHEMA)
records = [
{'array': [{'int': 1, 'double': 2.}, {'int': 3, 'double': 4.}]},
{'array': [{'int': 5, 'double': 6.}, {'int': 7, 'double': 8.}]},
]
return schema, records
def _check_array_avro_schema_results(test, results):
result1 = results[0]
result2 = results[1]
test.assertEqual(result1['array'][0]['int'], 1)
test.assertAlmostEqual(result1['array'][0]['double'], 2., delta=1e-3)
test.assertEqual(result1['array'][1]['int'], 3)
test.assertAlmostEqual(result1['array'][1]['double'], 4., delta=1e-3)
test.assertEqual(result2['array'][0]['int'], 5)
test.assertAlmostEqual(result2['array'][0]['double'], 6., delta=1e-3)
test.assertEqual(result2['array'][1]['int'], 7)
test.assertAlmostEqual(result2['array'][1]['double'], 8., delta=1e-3)
MAP_SCHEMA = """
{
"type": "record",
"name": "test",
"fields": [
{
"name": "map",
"type": {
"type": "map",
"values": "long"
}
}
]
}
"""
def _create_map_avro_schema_and_records() -> Tuple[AvroSchema, List[JavaObject]]:
schema = AvroSchema.parse_string(MAP_SCHEMA)
records = [_create_map_avro_record(schema, {'a': 1, 'b': 2}),
_create_map_avro_record(schema, {'c': 3, 'd': 4})]
return schema, records
def _create_map_avro_schema_and_py_objects() -> Tuple[AvroSchema, List[dict]]:
schema = AvroSchema.parse_string(MAP_SCHEMA)
records = [
{'map': {'a': 1, 'b': 2}},
{'map': {'c': 3, 'd': 4}},
]
return schema, records
def _check_map_avro_schema_results(test, results):
result1 = results[0]
result2 = results[1]
test.assertEqual(result1['map']['a'], 1)
test.assertEqual(result1['map']['b'], 2)
test.assertEqual(result2['map']['c'], 3)
test.assertEqual(result2['map']['d'], 4)
def _create_basic_avro_record(schema: AvroSchema, boolean_value, int_value, long_value,
float_value, double_value, string_value):
jvm = get_gateway().jvm
j_record = jvm.GenericData.Record(schema._j_schema)
j_record.put('boolean', boolean_value)
j_record.put('int', int_value)
j_record.put('long', long_value)
j_record.put('float', float_value)
j_record.put('double', double_value)
j_record.put('string', string_value)
return j_record
def _create_enum_avro_record(schema: AvroSchema, enum_value):
jvm = get_gateway().jvm
j_record = jvm.GenericData.Record(schema._j_schema)
j_enum = jvm.GenericData.EnumSymbol(schema._j_schema.getField('suit').schema(), enum_value)
j_record.put('suit', j_enum)
return j_record
def _create_union_avro_record(schema, union_value):
jvm = get_gateway().jvm
j_record = jvm.GenericData.Record(schema._j_schema)
j_record.put('union', union_value)
return j_record
def _create_array_avro_record(schema, item_values: list):
jvm = get_gateway().jvm
j_record = jvm.GenericData.Record(schema._j_schema)
item_schema = AvroSchema(schema._j_schema.getField('array').schema().getElementType())
j_array = jvm.java.util.ArrayList()
for idx, item_value in enumerate(item_values):
j_item = jvm.GenericData.Record(item_schema._j_schema)
j_item.put('int', item_value[0])
j_item.put('double', item_value[1])
j_array.add(j_item)
j_record.put('array', j_array)
return j_record
def _create_map_avro_record(schema, map: dict):
jvm = get_gateway().jvm
j_record = jvm.GenericData.Record(schema._j_schema)
j_map = jvm.java.util.HashMap()
for k, v in map.items():
j_map.put(k, v)
j_record.put('map', j_map)
return j_record
| 15,704 | 33.822616 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_stream_execution_environment_completeness.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.testing.test_case_utils import PythonAPICompletenessTestCase, PyFlinkTestCase
class StreamExecutionEnvironmentCompletenessTests(PythonAPICompletenessTestCase,
PyFlinkTestCase):
@classmethod
def python_class(cls):
return StreamExecutionEnvironment
@classmethod
def java_class(cls):
return "org.apache.flink.streaming.api.environment.StreamExecutionEnvironment"
@classmethod
def excluded_methods(cls):
# Exclude these methods for the time being, because current
# StreamExecutionEnvironment do not apply to the
# DataStream API, but to the Table API configuration.
# Currently only the methods for configuration is added.
# 'isForceCheckpointing', 'getNumberOfExecutionRetries', 'setNumberOfExecutionRetries'
# is deprecated, exclude them.
return {'getLastJobExecutionResult', 'getId', 'getIdString',
'createCollectionsEnvironment', 'createLocalEnvironment',
'createRemoteEnvironment', 'addOperator', 'fromElements',
'resetContextEnvironment', 'getCachedFiles', 'generateSequence',
'getNumberOfExecutionRetries', 'getStreamGraph', 'fromParallelCollection',
'readFileStream', 'isForceCheckpointing', 'readFile', 'clean',
'createInput', 'createLocalEnvironmentWithWebUI', 'fromCollection',
'socketTextStream', 'initializeContextEnvironment', 'readTextFile',
'setNumberOfExecutionRetries', 'executeAsync', 'registerJobListener',
'clearJobListeners', 'getJobListeners', 'fromSequence', 'getConfiguration',
'generateStreamGraph', 'getTransformations', 'areExplicitEnvironmentsAllowed',
'registerCollectIterator', 'listCompletedClusterDatasets',
'invalidateClusterDataset', 'registerCacheTransformation', 'close'}
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| 3,242 | 48.136364 | 94 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_check_point_config.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Duration
from pyflink.datastream import (CheckpointConfig, CheckpointingMode, ExternalizedCheckpointCleanup,
StreamExecutionEnvironment)
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkTestCase
class CheckpointConfigTests(PyFlinkTestCase):
def setUp(self):
self.env = StreamExecutionEnvironment\
.get_execution_environment()
self.checkpoint_config = self.env.get_checkpoint_config()
def test_constant(self):
gateway = get_gateway()
JCheckpointConfig = gateway.jvm.org.apache.flink.streaming.api.environment.CheckpointConfig
self.assertEqual(CheckpointConfig.DEFAULT_MAX_CONCURRENT_CHECKPOINTS,
JCheckpointConfig.DEFAULT_MAX_CONCURRENT_CHECKPOINTS)
self.assertEqual(CheckpointConfig.DEFAULT_MIN_PAUSE_BETWEEN_CHECKPOINTS,
JCheckpointConfig.DEFAULT_MIN_PAUSE_BETWEEN_CHECKPOINTS)
self.assertEqual(CheckpointConfig.DEFAULT_TIMEOUT, JCheckpointConfig.DEFAULT_TIMEOUT)
self.assertEqual(CheckpointConfig.DEFAULT_MODE,
CheckpointingMode._from_j_checkpointing_mode(
JCheckpointConfig.DEFAULT_MODE))
def test_is_checkpointing_enabled(self):
self.assertFalse(self.checkpoint_config.is_checkpointing_enabled())
self.env.enable_checkpointing(1000)
self.assertTrue(self.checkpoint_config.is_checkpointing_enabled())
def test_get_set_checkpointing_mode(self):
self.assertEqual(self.checkpoint_config.get_checkpointing_mode(),
CheckpointingMode.EXACTLY_ONCE)
self.checkpoint_config.set_checkpointing_mode(CheckpointingMode.AT_LEAST_ONCE)
self.assertEqual(self.checkpoint_config.get_checkpointing_mode(),
CheckpointingMode.AT_LEAST_ONCE)
self.checkpoint_config.set_checkpointing_mode(CheckpointingMode.EXACTLY_ONCE)
self.assertEqual(self.checkpoint_config.get_checkpointing_mode(),
CheckpointingMode.EXACTLY_ONCE)
def test_get_set_checkpoint_interval(self):
self.assertEqual(self.checkpoint_config.get_checkpoint_interval(), -1)
self.checkpoint_config.set_checkpoint_interval(1000)
self.assertEqual(self.checkpoint_config.get_checkpoint_interval(), 1000)
def test_get_set_checkpoint_timeout(self):
self.assertEqual(self.checkpoint_config.get_checkpoint_timeout(), 600000)
self.checkpoint_config.set_checkpoint_timeout(300000)
self.assertEqual(self.checkpoint_config.get_checkpoint_timeout(), 300000)
def test_get_set_min_pause_between_checkpoints(self):
self.assertEqual(self.checkpoint_config.get_min_pause_between_checkpoints(), 0)
self.checkpoint_config.set_min_pause_between_checkpoints(100000)
self.assertEqual(self.checkpoint_config.get_min_pause_between_checkpoints(), 100000)
def test_get_set_max_concurrent_checkpoints(self):
self.assertEqual(self.checkpoint_config.get_max_concurrent_checkpoints(), 1)
self.checkpoint_config.set_max_concurrent_checkpoints(2)
self.assertEqual(self.checkpoint_config.get_max_concurrent_checkpoints(), 2)
def test_get_set_fail_on_checkpointing_errors(self):
self.assertTrue(self.checkpoint_config.is_fail_on_checkpointing_errors())
self.checkpoint_config.set_fail_on_checkpointing_errors(False)
self.assertFalse(self.checkpoint_config.is_fail_on_checkpointing_errors())
def test_get_set_tolerable_checkpoint_failure_number(self):
self.assertEqual(self.checkpoint_config.get_tolerable_checkpoint_failure_number(), 0)
self.checkpoint_config.set_tolerable_checkpoint_failure_number(2)
self.assertEqual(self.checkpoint_config.get_tolerable_checkpoint_failure_number(), 2)
def test_get_set_externalized_checkpoints_cleanup(self):
self.assertFalse(self.checkpoint_config.is_externalized_checkpoints_enabled())
self.assertEqual(self.checkpoint_config.get_externalized_checkpoint_cleanup(),
ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS)
self.checkpoint_config.enable_externalized_checkpoints(
ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
self.assertTrue(self.checkpoint_config.is_externalized_checkpoints_enabled())
self.assertEqual(self.checkpoint_config.get_externalized_checkpoint_cleanup(),
ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
self.checkpoint_config.enable_externalized_checkpoints(
ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION)
self.assertEqual(self.checkpoint_config.get_externalized_checkpoint_cleanup(),
ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION)
def test_is_unaligned_checkpointing_enabled(self):
self.assertFalse(self.checkpoint_config.is_unaligned_checkpoints_enabled())
self.assertFalse(self.checkpoint_config.is_force_unaligned_checkpoints())
self.assertEqual(self.checkpoint_config.get_alignment_timeout(), Duration.of_millis(0))
self.checkpoint_config.enable_unaligned_checkpoints()
self.assertTrue(self.checkpoint_config.is_unaligned_checkpoints_enabled())
self.checkpoint_config.disable_unaligned_checkpoints()
self.assertFalse(self.checkpoint_config.is_unaligned_checkpoints_enabled())
self.checkpoint_config.enable_unaligned_checkpoints(True)
self.assertTrue(self.checkpoint_config.is_unaligned_checkpoints_enabled())
self.checkpoint_config.set_force_unaligned_checkpoints(True)
self.assertTrue(self.checkpoint_config.is_force_unaligned_checkpoints())
self.checkpoint_config.set_alignment_timeout(Duration.of_minutes(1))
self.assertEqual(self.checkpoint_config.get_alignment_timeout(), Duration.of_minutes(1))
def test_get_set_checkpoint_storage(self):
self.assertIsNone(self.checkpoint_config.get_checkpoint_storage(),
"Default checkpoint storage should be None")
self.checkpoint_config.set_checkpoint_storage_dir("file://var/checkpoints/")
self.assertEqual(self.checkpoint_config.get_checkpoint_storage().get_checkpoint_path(),
"file://var/checkpoints",
"Wrong checkpoints directory")
| 7,483 | 42.511628 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_data_stream.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import os
import uuid
from collections import defaultdict
from typing import Tuple
from pyflink.common import Row, Configuration
from pyflink.common.time import Time
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import WatermarkStrategy, TimestampAssigner
from pyflink.datastream import (TimeCharacteristic, RuntimeContext, SlotSharingGroup,
StreamExecutionEnvironment, RuntimeExecutionMode)
from pyflink.datastream.data_stream import DataStream
from pyflink.datastream.functions import (AggregateFunction, CoMapFunction, CoFlatMapFunction,
MapFunction, FilterFunction, FlatMapFunction,
KeyedCoProcessFunction, KeyedProcessFunction, KeySelector,
ProcessFunction, ReduceFunction, CoProcessFunction,
BroadcastProcessFunction, KeyedBroadcastProcessFunction)
from pyflink.datastream.output_tag import OutputTag
from pyflink.datastream.state import (ValueStateDescriptor, ListStateDescriptor, MapStateDescriptor,
ReducingStateDescriptor, ReducingState, AggregatingState,
AggregatingStateDescriptor, StateTtlConfig)
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.java_gateway import get_gateway
from pyflink.metrics import Counter, Meter, Distribution
from pyflink.testing.test_case_utils import (PyFlinkBatchTestCase, PyFlinkStreamingTestCase,
PyFlinkTestCase)
from pyflink.util.java_utils import get_j_env_configuration
class DataStreamTests(object):
def setUp(self) -> None:
super(DataStreamTests, self).setUp()
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("akka.ask.timeout", "20 s")
self.test_sink = DataStreamTestSinkFunction()
def tearDown(self) -> None:
self.test_sink.clear()
def assert_equals_sorted(self, expected, actual):
expected.sort()
actual.sort()
self.assertEqual(expected, actual)
def test_basic_operations(self):
ds = self.env.from_collection(
[('ab', Row('a', decimal.Decimal(1))),
('bdc', Row('b', decimal.Decimal(2))),
('cfgs', Row('c', decimal.Decimal(3))),
('deeefg', Row('d', decimal.Decimal(4)))],
type_info=Types.TUPLE([Types.STRING(), Types.ROW([Types.STRING(), Types.BIG_DEC()])]))
class MyMapFunction(MapFunction):
def map(self, value):
return Row(value[0], value[1] + 1, value[2])
class MyFlatMapFunction(FlatMapFunction):
def flat_map(self, value):
if value[1] % 2 == 0:
yield value
class MyFilterFunction(FilterFunction):
def filter(self, value):
return value[1] > 2
(ds.map(lambda i: (i[0], len(i[0]), i[1][1]),
output_type=Types.TUPLE([Types.STRING(), Types.INT(), Types.BIG_DEC()]))
.flat_map(MyFlatMapFunction(),
output_type=Types.TUPLE([Types.STRING(), Types.INT(), Types.BIG_DEC()]))
.filter(MyFilterFunction())
.map(MyMapFunction(),
output_type=Types.ROW([Types.STRING(), Types.INT(), Types.BIG_DEC()]))
.add_sink(self.test_sink))
self.env.execute('test_basic_operations')
results = self.test_sink.get_results()
expected = ["+I[cfgs, 5, 3]",
"+I[deeefg, 7, 4]"]
self.assert_equals_sorted(expected, results)
def test_partition_custom(self):
ds = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 2),
('f', 7), ('g', 7), ('h', 8), ('i', 8), ('j', 9)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
expected_num_partitions = 5
def my_partitioner(key, num_partitions):
assert expected_num_partitions == num_partitions
return key % num_partitions
partitioned_stream = ds.map(lambda x: x, output_type=Types.ROW([Types.STRING(),
Types.INT()]))\
.set_parallelism(4).partition_custom(my_partitioner, lambda x: x[1])
JPartitionCustomTestMapFunction = get_gateway().jvm\
.org.apache.flink.python.util.PartitionCustomTestMapFunction
test_map_stream = DataStream(partitioned_stream
._j_data_stream.map(JPartitionCustomTestMapFunction()))
test_map_stream.set_parallelism(expected_num_partitions).add_sink(self.test_sink)
self.env.execute('test_partition_custom')
def test_keyed_process_function_with_state(self):
self.env.get_config().set_auto_watermark_interval(2000)
self.env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
data_stream = self.env.from_collection([(1, 'hi', '1603708211000'),
(2, 'hello', '1603708224000'),
(3, 'hi', '1603708226000'),
(4, 'hello', '1603708289000'),
(5, 'hi', '1603708291000'),
(6, 'hello', '1603708293000')],
type_info=Types.ROW([Types.INT(), Types.STRING(),
Types.STRING()]))
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[2])
class MyProcessFunction(KeyedProcessFunction):
def __init__(self):
self.value_state = None
self.list_state = None
self.map_state = None
def open(self, runtime_context: RuntimeContext):
value_state_descriptor = ValueStateDescriptor('value_state', Types.INT())
self.value_state = runtime_context.get_state(value_state_descriptor)
list_state_descriptor = ListStateDescriptor('list_state', Types.INT())
self.list_state = runtime_context.get_list_state(list_state_descriptor)
map_state_descriptor = MapStateDescriptor('map_state', Types.INT(), Types.STRING())
state_ttl_config = StateTtlConfig \
.new_builder(Time.seconds(1)) \
.set_update_type(StateTtlConfig.UpdateType.OnReadAndWrite) \
.set_state_visibility(
StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp) \
.disable_cleanup_in_background() \
.build()
map_state_descriptor.enable_time_to_live(state_ttl_config)
self.map_state = runtime_context.get_map_state(map_state_descriptor)
def process_element(self, value, ctx):
current_value = self.value_state.value()
self.value_state.update(value[0])
current_list = [_ for _ in self.list_state.get()]
self.list_state.add(value[0])
map_entries = {k: v for k, v in self.map_state.items()}
keys = sorted(map_entries.keys())
map_entries_string = [str(k) + ': ' + str(map_entries[k]) for k in keys]
map_entries_string = '{' + ', '.join(map_entries_string) + '}'
self.map_state.put(value[0], value[1])
current_key = ctx.get_current_key()
yield "current key: {}, current value state: {}, current list state: {}, " \
"current map state: {}, current value: {}".format(str(current_key),
str(current_value),
str(current_list),
map_entries_string,
str(value))
def on_timer(self, timestamp, ctx):
pass
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(MyTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[1], key_type=Types.STRING()) \
.process(MyProcessFunction(), output_type=Types.STRING()) \
.add_sink(self.test_sink)
self.env.execute('test time stamp assigner with keyed process function')
results = self.test_sink.get_results()
expected = ["current key: hi, current value state: None, current list state: [], "
"current map state: {}, current value: Row(f0=1, f1='hi', "
"f2='1603708211000')",
"current key: hello, current value state: None, "
"current list state: [], current map state: {}, current value: Row(f0=2,"
" f1='hello', f2='1603708224000')",
"current key: hi, current value state: 1, current list state: [1], "
"current map state: {1: hi}, current value: Row(f0=3, f1='hi', "
"f2='1603708226000')",
"current key: hello, current value state: 2, current list state: [2], "
"current map state: {2: hello}, current value: Row(f0=4, f1='hello', "
"f2='1603708289000')",
"current key: hi, current value state: 3, current list state: [1, 3], "
"current map state: {1: hi, 3: hi}, current value: Row(f0=5, f1='hi', "
"f2='1603708291000')",
"current key: hello, current value state: 4, current list state: [2, 4],"
" current map state: {2: hello, 4: hello}, current value: Row(f0=6, "
"f1='hello', f2='1603708293000')"]
self.assert_equals_sorted(expected, results)
def test_reducing_state(self):
self.env.set_parallelism(2)
data_stream = self.env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello')],
type_info=Types.TUPLE([Types.INT(), Types.STRING()]))
class MyProcessFunction(KeyedProcessFunction):
def __init__(self):
self.reducing_state = None # type: ReducingState
def open(self, runtime_context: RuntimeContext):
self.reducing_state = runtime_context.get_reducing_state(
ReducingStateDescriptor(
'reducing_state', lambda i, i2: i + i2, Types.INT()))
def process_element(self, value, ctx):
self.reducing_state.add(value[0])
yield self.reducing_state.get(), value[1]
data_stream.key_by(lambda x: x[1], key_type=Types.STRING()) \
.process(MyProcessFunction(), output_type=Types.TUPLE([Types.INT(), Types.STRING()])) \
.add_sink(self.test_sink)
self.env.execute('test_reducing_state')
result = self.test_sink.get_results()
expected_result = ['(1,hi)', '(2,hello)', '(4,hi)', '(6,hello)', '(9,hi)', '(12,hello)']
result.sort()
expected_result.sort()
self.assertEqual(expected_result, result)
def test_aggregating_state(self):
self.env.set_parallelism(2)
data_stream = self.env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello')],
type_info=Types.TUPLE([Types.INT(), Types.STRING()]))
class MyAggregateFunction(AggregateFunction):
def create_accumulator(self):
return 0
def add(self, value, accumulator):
return value + accumulator
def get_result(self, accumulator):
return accumulator
def merge(self, acc_a, acc_b):
return acc_a + acc_b
class MyProcessFunction(KeyedProcessFunction):
def __init__(self):
self.aggregating_state = None # type: AggregatingState
def open(self, runtime_context: RuntimeContext):
descriptor = AggregatingStateDescriptor(
'aggregating_state', MyAggregateFunction(), Types.INT())
state_ttl_config = StateTtlConfig \
.new_builder(Time.seconds(1)) \
.set_update_type(StateTtlConfig.UpdateType.OnReadAndWrite) \
.disable_cleanup_in_background() \
.build()
descriptor.enable_time_to_live(state_ttl_config)
self.aggregating_state = runtime_context.get_aggregating_state(descriptor)
def process_element(self, value, ctx):
self.aggregating_state.add(value[0])
yield self.aggregating_state.get(), value[1]
config = Configuration(
j_configuration=get_j_env_configuration(self.env._j_stream_execution_environment))
config.set_integer("python.fn-execution.bundle.size", 1)
data_stream.key_by(lambda x: x[1], key_type=Types.STRING()) \
.process(MyProcessFunction(), output_type=Types.TUPLE([Types.INT(), Types.STRING()])) \
.add_sink(self.test_sink)
self.env.execute('test_aggregating_state')
results = self.test_sink.get_results()
expected = ['(1,hi)', '(2,hello)', '(4,hi)', '(6,hello)', '(9,hi)', '(12,hello)']
self.assert_equals_sorted(expected, results)
def test_basic_co_operations(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_stream_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
class MyCoFlatMapFunction(CoFlatMapFunction):
def flat_map1(self, value):
yield value + 1
def flat_map2(self, value):
yield value - 1
class MyCoMapFunction(CoMapFunction):
def map1(self, value):
from test_stream_dependency_manage_lib import add_two
return add_two(value)
def map2(self, value):
return value + 1
self.env.add_python_file(python_file_path)
ds_1 = self.env.from_collection([1, 2, 3, 4, 5])
ds_2 = ds_1.map(lambda x: x * 2)
(ds_1.connect(ds_2).flat_map(MyCoFlatMapFunction())
.connect(ds_2).map(MyCoMapFunction())
.add_sink(self.test_sink))
self.env.execute("test_basic_co_operations")
results = self.test_sink.get_results(True)
expected = ['4', '5', '6', '7', '8', '3', '5', '7', '9', '11', '3', '5', '7', '9', '11']
self.assert_equals_sorted(expected, results)
def test_keyed_co_process(self):
self.env.set_parallelism(1)
ds1 = self.env.from_collection([("a", 1), ("b", 2), ("c", 3)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds2 = self.env.from_collection([("b", 2), ("c", 3), ("d", 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds1 = ds1.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps().with_timestamp_assigner(
SecondColumnTimestampAssigner()))
ds2 = ds2.assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps().with_timestamp_assigner(
SecondColumnTimestampAssigner()))
ds1.connect(ds2) \
.key_by(lambda x: x[0], lambda x: x[0]) \
.process(MyKeyedCoProcessFunction()) \
.map(lambda x: Row(x[0], x[1] + 1)) \
.add_sink(self.test_sink)
self.env.execute('test_keyed_co_process_function')
results = self.test_sink.get_results(True)
expected = ["<Row('a', 2)>",
"<Row('b', 2)>",
"<Row('b', 3)>",
"<Row('c', 2)>",
"<Row('c', 3)>",
"<Row('d', 2)>",
"<Row('on_timer', 4)>"]
self.assert_equals_sorted(expected, results)
def test_co_broadcast_process(self):
ds = self.env.from_collection([1, 2, 3, 4, 5], type_info=Types.INT()) # type: DataStream
ds_broadcast = self.env.from_collection(
[(0, "a"), (1, "b")], type_info=Types.TUPLE([Types.INT(), Types.STRING()])
) # type: DataStream
class MyBroadcastProcessFunction(BroadcastProcessFunction):
def __init__(self, map_state_desc):
self._map_state_desc = map_state_desc
self._cache = defaultdict(list)
def process_element(self, value: int, ctx: BroadcastProcessFunction.ReadOnlyContext):
ro_broadcast_state = ctx.get_broadcast_state(self._map_state_desc)
key = value % 2
if ro_broadcast_state.contains(key):
if self._cache.get(key) is not None:
for v in self._cache[key]:
yield ro_broadcast_state.get(key) + str(v)
self._cache[key].clear()
yield ro_broadcast_state.get(key) + str(value)
else:
self._cache[key].append(value)
def process_broadcast_element(
self, value: Tuple[int, str], ctx: BroadcastProcessFunction.Context
):
key = value[0]
yield str(key) + value[1]
broadcast_state = ctx.get_broadcast_state(self._map_state_desc)
broadcast_state.put(key, value[1])
if self._cache.get(key) is not None:
for v in self._cache[key]:
yield value[1] + str(v)
self._cache[key].clear()
map_state_desc = MapStateDescriptor(
"mapping", key_type_info=Types.INT(), value_type_info=Types.STRING()
)
ds.connect(ds_broadcast.broadcast(map_state_desc)).process(
MyBroadcastProcessFunction(map_state_desc), output_type=Types.STRING()
).add_sink(self.test_sink)
self.env.execute("test_co_broadcast_process")
expected = ["0a", "0a", "1b", "1b", "a2", "a4", "b1", "b3", "b5"]
self.assert_equals_sorted(expected, self.test_sink.get_results())
def test_keyed_co_broadcast_process(self):
ds = self.env.from_collection(
[(1, '1603708211000'),
(2, '1603708212000'),
(3, '1603708213000'),
(4, '1603708214000')],
type_info=Types.ROW([Types.INT(), Types.STRING()])) # type: DataStream
ds_broadcast = self.env.from_collection(
[(0, '1603708215000', 'a'),
(1, '1603708215000', 'b')],
type_info=Types.ROW([Types.INT(), Types.STRING(), Types.STRING()])
) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
ds = ds.assign_timestamps_and_watermarks(watermark_strategy)
ds_broadcast = ds_broadcast.assign_timestamps_and_watermarks(watermark_strategy)
def _create_string(s, t):
return 'value: {}, ts: {}'.format(s, t)
class MyKeyedBroadcastProcessFunction(KeyedBroadcastProcessFunction):
def __init__(self, map_state_desc):
self._map_state_desc = map_state_desc
self._cache = None
def open(self, runtime_context: RuntimeContext):
self._cache = defaultdict(list)
def process_element(
self, value: Tuple[int, str], ctx: KeyedBroadcastProcessFunction.ReadOnlyContext
):
ro_broadcast_state = ctx.get_broadcast_state(self._map_state_desc)
key = value[0] % 2
if ro_broadcast_state.contains(key):
if self._cache.get(key) is not None:
for v in self._cache[key]:
yield _create_string(ro_broadcast_state.get(key) + str(v[0]), v[1])
self._cache[key].clear()
yield _create_string(ro_broadcast_state.get(key) + str(value[0]), value[1])
else:
self._cache[key].append(value)
ctx.timer_service().register_event_time_timer(ctx.timestamp() + 10000)
def process_broadcast_element(
self, value: Tuple[int, str, str], ctx: KeyedBroadcastProcessFunction.Context
):
key = value[0]
yield _create_string(str(key) + value[2], ctx.timestamp())
broadcast_state = ctx.get_broadcast_state(self._map_state_desc)
broadcast_state.put(key, value[2])
if self._cache.get(key) is not None:
for v in self._cache[key]:
yield _create_string(value[2] + str(v[0]), v[1])
self._cache[key].clear()
def on_timer(self, timestamp: int, ctx: KeyedBroadcastProcessFunction.OnTimerContext):
yield _create_string(ctx.get_current_key(), timestamp)
map_state_desc = MapStateDescriptor(
"mapping", key_type_info=Types.INT(), value_type_info=Types.STRING()
)
ds.key_by(lambda t: t[0]).connect(ds_broadcast.broadcast(map_state_desc)).process(
MyKeyedBroadcastProcessFunction(map_state_desc), output_type=Types.STRING()
).add_sink(self.test_sink)
self.env.execute("test_keyed_co_broadcast_process")
expected = [
'value: 0a, ts: 1603708215000',
'value: 0a, ts: 1603708215000',
'value: 1, ts: 1603708221000',
'value: 1b, ts: 1603708215000',
'value: 1b, ts: 1603708215000',
'value: 2, ts: 1603708222000',
'value: 3, ts: 1603708223000',
'value: 4, ts: 1603708224000',
'value: a2, ts: 1603708212000',
'value: a4, ts: 1603708214000',
'value: b1, ts: 1603708211000',
'value: b3, ts: 1603708213000'
]
self.assert_equals_sorted(expected, self.test_sink.get_results())
def test_process_side_output(self):
tag = OutputTag("side", Types.INT())
ds = self.env.from_collection([('a', 0), ('b', 1), ('c', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield value[0]
yield tag, value[1]
ds2 = ds.process(MyProcessFunction(), output_type=Types.STRING())
main_sink = DataStreamTestSinkFunction()
ds2.add_sink(main_sink)
side_sink = DataStreamTestSinkFunction()
ds2.get_side_output(tag).add_sink(side_sink)
self.env.execute("test_process_side_output")
main_expected = ['a', 'b', 'c']
self.assert_equals_sorted(main_expected, main_sink.get_results())
side_expected = ['0', '1', '2']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_side_output_chained_with_upstream_operator(self):
tag = OutputTag("side", Types.INT())
ds = self.env.from_collection([('a', 0), ('b', 1), ('c', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield value[0]
yield tag, value[1]
ds2 = ds.map(lambda e: (e[0], e[1]+1)) \
.process(MyProcessFunction(), output_type=Types.STRING())
main_sink = DataStreamTestSinkFunction()
ds2.add_sink(main_sink)
side_sink = DataStreamTestSinkFunction()
ds2.get_side_output(tag).add_sink(side_sink)
self.env.execute("test_side_output_chained_with_upstream_operator")
main_expected = ['a', 'b', 'c']
self.assert_equals_sorted(main_expected, main_sink.get_results())
side_expected = ['1', '2', '3']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_process_multiple_side_output(self):
tag1 = OutputTag("side1", Types.INT())
tag2 = OutputTag("side2", Types.STRING())
ds = self.env.from_collection([('a', 0), ('b', 1), ('c', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield value[0]
yield tag1, value[1]
yield tag2, value[0] + str(value[1])
ds2 = ds.process(MyProcessFunction(), output_type=Types.STRING())
main_sink = DataStreamTestSinkFunction()
ds2.add_sink(main_sink)
side1_sink = DataStreamTestSinkFunction()
ds2.get_side_output(tag1).add_sink(side1_sink)
side2_sink = DataStreamTestSinkFunction()
ds2.get_side_output(tag2).add_sink(side2_sink)
self.env.execute("test_process_multiple_side_output")
main_expected = ['a', 'b', 'c']
self.assert_equals_sorted(main_expected, main_sink.get_results())
side1_expected = ['0', '1', '2']
self.assert_equals_sorted(side1_expected, side1_sink.get_results())
side2_expected = ['a0', 'b1', 'c2']
self.assert_equals_sorted(side2_expected, side2_sink.get_results())
def test_co_process_side_output(self):
tag = OutputTag("side", Types.INT())
class MyCoProcessFunction(CoProcessFunction):
def process_element1(self, value, ctx: 'CoProcessFunction.Context'):
yield value[0]
yield tag, value[1]
def process_element2(self, value, ctx: 'CoProcessFunction.Context'):
yield value[1]
yield tag, value[0]
ds1 = self.env.from_collection([('a', 0), ('b', 1), ('c', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds2 = self.env.from_collection([(3, 'c'), (1, 'a'), (0, 'd')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
ds3 = ds1.connect(ds2).process(MyCoProcessFunction(), output_type=Types.STRING())
ds3.add_sink(self.test_sink)
side_sink = DataStreamTestSinkFunction()
ds3.get_side_output(tag).add_sink(side_sink)
self.env.execute("test_co_process_side_output")
main_expected = ['a', 'a', 'b', 'c', 'c', 'd']
self.assert_equals_sorted(main_expected, self.test_sink.get_results())
side_expected = ['0', '0', '1', '1', '2', '3']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_co_broadcast_side_output(self):
tag = OutputTag("side", Types.INT())
class MyBroadcastProcessFunction(BroadcastProcessFunction):
def process_element(self, value, ctx):
yield value[0]
yield tag, value[1]
def process_broadcast_element(self, value, ctx):
yield value[1]
yield tag, value[0]
self.env.set_parallelism(2)
ds = self.env.from_collection([('a', 0), ('b', 1), ('c', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds_broadcast = self.env.from_collection([(3, 'd'), (4, 'f')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
map_state_desc = MapStateDescriptor(
"dummy", key_type_info=Types.INT(), value_type_info=Types.STRING()
)
ds = ds.connect(ds_broadcast.broadcast(map_state_desc)).process(
MyBroadcastProcessFunction(), output_type=Types.STRING()
)
side_sink = DataStreamTestSinkFunction()
ds.get_side_output(tag).add_sink(side_sink)
ds.add_sink(self.test_sink)
self.env.execute("test_co_broadcast_process_side_output")
main_expected = ['a', 'b', 'c', 'd', 'd', 'f', 'f']
self.assert_equals_sorted(main_expected, self.test_sink.get_results())
side_expected = ['0', '1', '2', '3', '3', '4', '4']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_keyed_process_side_output(self):
tag = OutputTag("side", Types.INT())
ds = self.env.from_collection([('a', 1), ('b', 2), ('a', 3), ('b', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
class MyKeyedProcessFunction(KeyedProcessFunction):
def __init__(self):
self.reducing_state = None # type: ReducingState
def open(self, context: RuntimeContext):
self.reducing_state = context.get_reducing_state(
ReducingStateDescriptor("reduce", lambda i, j: i+j, Types.INT())
)
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield value[1]
self.reducing_state.add(value[1])
yield tag, self.reducing_state.get()
ds2 = ds.key_by(lambda e: e[0]).process(MyKeyedProcessFunction(),
output_type=Types.INT())
main_sink = DataStreamTestSinkFunction()
ds2.add_sink(main_sink)
side_sink = DataStreamTestSinkFunction()
ds2.get_side_output(tag).add_sink(side_sink)
self.env.execute("test_keyed_process_side_output")
main_expected = ['1', '2', '3', '4']
self.assert_equals_sorted(main_expected, main_sink.get_results())
side_expected = ['1', '2', '4', '6']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_keyed_co_process_side_output(self):
tag = OutputTag("side", Types.INT())
ds1 = self.env.from_collection([('a', 1), ('b', 2), ('a', 3), ('b', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds2 = self.env.from_collection([(8, 'a'), (7, 'b'), (6, 'a'), (5, 'b')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
class MyKeyedCoProcessFunction(KeyedCoProcessFunction):
def __init__(self):
self.reducing_state = None # type: ReducingState
def open(self, context: RuntimeContext):
self.reducing_state = context.get_reducing_state(
ReducingStateDescriptor("reduce", lambda i, j: i+j, Types.INT())
)
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
yield ctx.get_current_key(), value[1]
self.reducing_state.add(1)
yield tag, self.reducing_state.get()
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
yield ctx.get_current_key(), value[0]
self.reducing_state.add(1)
yield tag, self.reducing_state.get()
ds3 = ds1.key_by(lambda e: e[0])\
.connect(ds2.key_by(lambda e: e[1]))\
.process(MyKeyedCoProcessFunction(),
output_type=Types.TUPLE([Types.STRING(), Types.INT()]))
main_sink = DataStreamTestSinkFunction()
ds3.add_sink(main_sink)
side_sink = DataStreamTestSinkFunction()
ds3.get_side_output(tag).add_sink(side_sink)
self.env.execute("test_keyed_co_process_side_output")
main_expected = ['(a,1)', '(b,2)', '(a,3)', '(b,4)', '(b,5)', '(a,6)', '(b,7)', '(a,8)']
self.assert_equals_sorted(main_expected, main_sink.get_results())
side_expected = ['1', '1', '2', '2', '3', '3', '4', '4']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_keyed_co_broadcast_side_output(self):
tag = OutputTag("side", Types.INT())
class MyKeyedBroadcastProcessFunction(KeyedBroadcastProcessFunction):
def __init__(self):
self.reducing_state = None # type: ReducingState
def open(self, context: RuntimeContext):
self.reducing_state = context.get_reducing_state(
ReducingStateDescriptor("reduce", lambda i, j: i+j, Types.INT())
)
def process_element(self, value, ctx):
self.reducing_state.add(value[1])
yield value[0]
yield tag, self.reducing_state.get()
def process_broadcast_element(self, value, ctx):
yield value[1]
yield tag, value[0]
self.env.set_parallelism(2)
ds = self.env.from_collection([('a', 0), ('b', 1), ('a', 2), ('b', 3)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds_broadcast = self.env.from_collection([(5, 'c'), (6, 'd')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
map_state_desc = MapStateDescriptor(
"dummy", key_type_info=Types.INT(), value_type_info=Types.STRING()
)
ds = ds.key_by(lambda e: e[0]).connect(ds_broadcast.broadcast(map_state_desc)).process(
MyKeyedBroadcastProcessFunction(), output_type=Types.STRING()
)
side_sink = DataStreamTestSinkFunction()
ds.get_side_output(tag).add_sink(side_sink)
ds.add_sink(self.test_sink)
self.env.execute("test_keyed_co_broadcast_process_side_output")
main_expected = ['a', 'a', 'b', 'b', 'c', 'c', 'd', 'd']
self.assert_equals_sorted(main_expected, self.test_sink.get_results())
side_expected = ['0', '1', '2', '4', '5', '5', '6', '6']
self.assert_equals_sorted(side_expected, side_sink.get_results())
def test_side_output_stream_execute_and_collect(self):
tag = OutputTag("side", Types.INT())
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx):
yield value
yield tag, value * 2
ds = self.env.from_collection([1, 2, 3], Types.INT()).process(MyProcessFunction())
ds_side = ds.get_side_output(tag)
result = [i for i in ds_side.execute_and_collect()]
expected = [2, 4, 6]
self.assert_equals_sorted(expected, result)
def test_side_output_tag_reusing(self):
tag = OutputTag("side", Types.INT())
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx):
yield value
yield tag, value * 2
side1_sink = DataStreamTestSinkFunction()
ds = self.env.from_collection([1, 2, 3], Types.INT()).process(MyProcessFunction())
ds.get_side_output(tag).add_sink(side1_sink)
side2_sink = DataStreamTestSinkFunction()
ds.map(lambda i: i*2).process(MyProcessFunction()).get_side_output(tag).add_sink(side2_sink)
self.env.execute("test_side_output_tag_reusing")
result1 = [i for i in side1_sink.get_results(stringify=False)]
result2 = [i for i in side2_sink.get_results(stringify=False)]
self.assert_equals_sorted(['2', '4', '6'], result1)
self.assert_equals_sorted(['4', '8', '12'], result2)
class DataStreamStreamingTests(DataStreamTests):
def test_reduce_with_state(self):
ds = self.env.from_collection([('a', 0), ('c', 1), ('d', 1), ('b', 0), ('e', 1)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
keyed_stream = ds.key_by(MyKeySelector(), key_type=Types.INT())
with self.assertRaises(Exception):
keyed_stream.name("keyed stream")
keyed_stream.reduce(MyReduceFunction()).add_sink(self.test_sink)
self.env.execute('key_by_test')
results = self.test_sink.get_results(False)
expected = ['+I[a, 0]', '+I[ab, 0]', '+I[c, 1]', '+I[cd, 1]', '+I[cde, 1]']
self.assert_equals_sorted(expected, results)
class DataStreamBatchTests(DataStreamTests):
def test_reduce_with_state(self):
ds = self.env.from_collection([('a', 0), ('c', 1), ('d', 1), ('b', 0), ('e', 1)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
keyed_stream = ds.key_by(MyKeySelector(), key_type=Types.INT())
with self.assertRaises(Exception):
keyed_stream.name("keyed stream")
keyed_stream.reduce(MyReduceFunction()).add_sink(self.test_sink)
self.env.execute('key_by_test')
results = self.test_sink.get_results(False)
expected = ['+I[ab, 0]', '+I[cde, 1]']
self.assert_equals_sorted(expected, results)
class ProcessDataStreamTests(DataStreamTests):
"""
The tests only tested in Process Mode.
"""
def test_basic_co_operations_with_output_type(self):
class MyCoMapFunction(CoMapFunction):
def map1(self, value):
return value + 2
def map2(self, value):
return value + 1
class MyCoFlatMapFunction(CoFlatMapFunction):
def flat_map1(self, value):
yield value + 1
def flat_map2(self, value):
yield value - 1
ds_1 = self.env.from_collection([1, 2, 3, 4, 5])
ds_2 = ds_1.map(lambda x: x * 2)
(ds_1.connect(ds_2).flat_map(MyCoFlatMapFunction(), output_type=Types.INT())
.connect(ds_2).map(MyCoMapFunction(), output_type=Types.INT())
.add_sink(self.test_sink))
self.env.execute("test_basic_co_operations_with_output_type")
results = self.test_sink.get_results()
expected = ['4', '5', '6', '7', '8', '3', '5', '7', '9', '11', '3', '5', '7', '9', '11']
self.assert_equals_sorted(expected, results)
def test_keyed_co_map(self):
ds1 = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()])) \
.key_by(MyKeySelector(), key_type=Types.INT())
ds2 = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
class AssertKeyCoMapFunction(CoMapFunction):
def __init__(self):
self.pre1 = None
self.pre2 = None
def open(self, runtime_context: RuntimeContext):
self.pre1 = runtime_context.get_state(
ValueStateDescriptor("pre1", Types.STRING()))
self.pre2 = runtime_context.get_state(
ValueStateDescriptor("pre2", Types.STRING()))
def map1(self, value):
if value[0] == 'b':
assert self.pre1.value() == 'a'
if value[0] == 'd':
assert self.pre1.value() == 'c'
self.pre1.update(value[0])
return value
def map2(self, value):
if value[0] == 'b':
assert self.pre2.value() == 'a'
if value[0] == 'd':
assert self.pre2.value() == 'c'
self.pre2.update(value[0])
return value
ds1.connect(ds2)\
.key_by(MyKeySelector(), MyKeySelector(), key_type=Types.INT())\
.map(AssertKeyCoMapFunction())\
.map(lambda x: (x[0], x[1] + 1)) \
.add_sink(self.test_sink)
self.env.execute()
results = self.test_sink.get_results(True)
expected = ["('e', 3)", "('a', 1)", "('b', 1)", "('c', 2)", "('d', 2)", "('e', 3)",
"('a', 1)", "('b', 1)", "('c', 2)", "('d', 2)"]
self.assert_equals_sorted(expected, results)
def test_keyed_co_flat_map(self):
ds1 = self.env.from_collection([(1, 1), (2, 2), (3, 3)],
type_info=Types.ROW([Types.INT(), Types.INT()]))
ds2 = self.env.from_collection([("a", "a"), ("b", "b"), ("c", "c"), ("a", "a")],
type_info=Types.ROW([Types.STRING(), Types.STRING()]))
ds1.connect(ds2).key_by(lambda x: 1, lambda x: 1) \
.flat_map(MyRichCoFlatMapFunction(), output_type=Types.STRING()) \
.filter(lambda x: x != '4') \
.add_sink(self.test_sink)
self.env.execute('test_keyed_co_flat_map')
results = self.test_sink.get_results(False)
expected = ['2', '2', '3', '3', 'a', 'b', 'c']
self.assert_equals_sorted(expected, results)
def test_keyed_map(self):
from pyflink.util.java_utils import get_j_env_configuration
from pyflink.common import Configuration
config = Configuration(
j_configuration=get_j_env_configuration(self.env._j_stream_execution_environment))
config.set_integer("python.fn-execution.bundle.size", 1)
ds = self.env.from_collection([('a', 0), ('b', 1), ('c', 0), ('d', 1), ('e', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
keyed_stream = ds.key_by(MyKeySelector(), key_type=Types.INT())
with self.assertRaises(Exception):
keyed_stream.name("keyed stream")
class AssertKeyMapFunction(MapFunction):
def __init__(self):
self.state = None
def open(self, runtime_context: RuntimeContext):
self.state = runtime_context.get_state(
ValueStateDescriptor("test_state", Types.INT()))
def map(self, value):
if value[0] == 'a':
pass
elif value[0] == 'b':
state_value = self._get_state_value()
assert state_value == 1
self.state.update(state_value)
elif value[0] == 'c':
state_value = self._get_state_value()
assert state_value == 1
self.state.update(state_value)
elif value[0] == 'd':
state_value = self._get_state_value()
assert state_value == 2
self.state.update(state_value)
else:
pass
return value
def _get_state_value(self):
state_value = self.state.value()
if state_value is None:
state_value = 1
else:
state_value += 1
return state_value
keyed_stream.map(AssertKeyMapFunction())\
.map(lambda x: (x[0], x[1] + 1))\
.add_sink(self.test_sink)
self.env.execute('test_keyed_map')
results = self.test_sink.get_results(True)
expected = ["('e', 3)", "('a', 1)", "('b', 2)", "('c', 1)", "('d', 2)"]
self.assert_equals_sorted(expected, results)
def test_keyed_flat_map(self):
ds = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
keyed_stream = ds.key_by(MyKeySelector(), key_type=Types.INT())
with self.assertRaises(Exception):
keyed_stream.name("keyed stream")
class AssertKeyMapFunction(FlatMapFunction):
def __init__(self):
self.pre = None
self.state = None
def open(self, runtime_context: RuntimeContext):
self.state = runtime_context.get_state(
ValueStateDescriptor("test_state", Types.INT()))
def flat_map(self, value):
state_value = self.state.value()
if state_value is None:
state_value = 1
else:
state_value += 1
if value[0] == 'b':
assert self.pre == 'a'
assert state_value == 2
if value[0] == 'd':
assert self.pre == 'c'
assert state_value == 2
if value[0] == 'e':
assert state_value == 1
self.pre = value[0]
self.state.update(state_value)
yield value
keyed_stream.flat_map(AssertKeyMapFunction())\
.map(lambda x: (x[0], x[1] + 1))\
.add_sink(self.test_sink)
self.env.execute('test_keyed_flat_map')
results = self.test_sink.get_results(True)
expected = ["('e', 3)", "('a', 1)", "('b', 1)", "('c', 2)", "('d', 2)"]
self.assert_equals_sorted(expected, results)
def test_keyed_filter(self):
ds = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
keyed_stream = ds.key_by(MyKeySelector())
with self.assertRaises(Exception):
keyed_stream.name("keyed stream")
class AssertKeyFilterFunction(FilterFunction):
def __init__(self):
self.pre = None
self.state = None
def open(self, runtime_context: RuntimeContext):
self.state = runtime_context.get_state(
ValueStateDescriptor("test_state", Types.INT()))
def filter(self, value):
state_value = self.state.value()
if state_value is None:
state_value = 1
else:
state_value += 1
if value[0] == 'b':
assert self.pre == 'a'
assert state_value == 2
return False
if value[0] == 'd':
assert self.pre == 'c'
assert state_value == 2
return False
if value[0] == 'e':
assert state_value == 1
self.pre = value[0]
self.state.update(state_value)
return True
keyed_stream.filter(AssertKeyFilterFunction())\
.filter(lambda x: x[1] > 0)\
.add_sink(self.test_sink)
self.env.execute('key_by_test')
results = self.test_sink.get_results(False)
expected = ['+I[c, 1]', '+I[e, 2]']
self.assert_equals_sorted(expected, results)
def test_multi_key_by(self):
ds = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds.key_by(MyKeySelector(), key_type=Types.INT()).key_by(lambda x: x[0])\
.add_sink(self.test_sink)
self.env.execute("test multi key by")
results = self.test_sink.get_results(False)
expected = ['+I[d, 1]', '+I[c, 1]', '+I[a, 0]', '+I[b, 0]', '+I[e, 2]']
self.assert_equals_sorted(expected, results)
def test_collection_type_info(self):
ds = self.env.from_collection([(1, [1.1, 1.2, 1.30], [None, 'hi', 'flink'],
datetime.date(2021, 1, 9), datetime.time(12, 0, 0),
datetime.datetime(2021, 1, 9, 12, 0, 0, 11000),
[1, 2, 3])],
type_info=Types.ROW([Types.INT(),
Types.PRIMITIVE_ARRAY(Types.FLOAT()),
Types.BASIC_ARRAY(Types.STRING()),
Types.SQL_DATE(), Types.SQL_TIME(),
Types.SQL_TIMESTAMP(),
Types.LIST(Types.INT())]))
ds.map(lambda x: x, output_type=Types.ROW([Types.INT(),
Types.PRIMITIVE_ARRAY(Types.FLOAT()),
Types.BASIC_ARRAY(Types.STRING()),
Types.SQL_DATE(), Types.SQL_TIME(),
Types.SQL_TIMESTAMP(),
Types.LIST(Types.INT())])) \
.add_sink(self.test_sink)
self.env.execute("test_collection_type_info")
results = self.test_sink.get_results()
expected = ["+I[1, [1.1, 1.2, 1.3], [null, hi, flink], 2021-01-09, 12:00:00,"
" 2021-01-09 12:00:00.011, [1, 2, 3]]"]
self.assert_equals_sorted(expected, results)
def test_process_function(self):
self.env.set_parallelism(1)
self.env.get_config().set_auto_watermark_interval(2000)
self.env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
data_stream = self.env.from_collection([(1, '1603708211000'),
(2, '1603708224000'),
(3, '1603708226000'),
(4, '1603708289000')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
class MyProcessFunction(ProcessFunction):
def process_element(self, value, ctx):
current_timestamp = ctx.timestamp()
yield "current timestamp: {}, current_value: {}"\
.format(str(current_timestamp), str(value))
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps()\
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy)\
.process(MyProcessFunction(), output_type=Types.STRING()).add_sink(self.test_sink)
self.env.execute('test process function')
results = self.test_sink.get_results()
expected = ["current timestamp: 1603708211000, "
"current_value: Row(f0=1, f1='1603708211000')",
"current timestamp: 1603708224000, "
"current_value: Row(f0=2, f1='1603708224000')",
"current timestamp: 1603708226000, "
"current_value: Row(f0=3, f1='1603708226000')",
"current timestamp: 1603708289000, "
"current_value: Row(f0=4, f1='1603708289000')"]
self.assert_equals_sorted(expected, results)
class ProcessDataStreamStreamingTests(DataStreamStreamingTests, ProcessDataStreamTests,
PyFlinkStreamingTestCase):
def test_keyed_sum(self):
self.env.set_parallelism(1)
ds = self.env.from_collection(
[(1, 1), (1, 2), (1, 3), (2, 5), (2, 1)],
type_info=Types.ROW_NAMED(["v1", "v2"], [Types.INT(), Types.INT()])
)
ds.key_by(lambda x: x[0]) \
.sum("v2") \
.key_by(lambda x: x[0]) \
.sum(1) \
.map(lambda x: (x[1], x[0]), output_type=Types.TUPLE([Types.INT(), Types.INT()])) \
.key_by(lambda x: x[1]) \
.sum() \
.add_sink(self.test_sink)
self.env.execute("key_by_sum_test_stream")
results = self.test_sink.get_results(False)
expected = ['(1,1)', '(5,1)', '(15,1)', '(5,2)', '(16,2)']
self.assert_equals_sorted(expected, results)
def test_keyed_min_by_and_max(self):
self.env.set_parallelism(1)
ds = self.env.from_collection([('a', 3, 0), ('a', 1, 1), ('b', 5, 0), ('b', 3, 1)],
type_info=Types.ROW_NAMED(
["v1", "v2", "v3"],
[Types.STRING(), Types.INT(), Types.INT()])
)
# 1th operator min_by: ('a', 3, 0), ('a', 1, 1), ('b', 5, 0), ('b', 3, 1)
# 2th operator max_by: ('a', 3, 0), ('a', 3, 0), ('b', 5, 0), ('b', 5, 0)
# 3th operator min_by: ('a', 3, 0), ('a', 3, 0), ('a', 3, 0), ('a', 3, 0)
# 4th operator max_by: ('a', 'a', 'a', 'a')
ds.key_by(lambda x: x[0]) \
.min_by("v2") \
.map(lambda x: (x[0], x[1], x[2]),
output_type=Types.TUPLE([Types.STRING(), Types.INT(), Types.INT()])) \
.key_by(lambda x: x[2]) \
.max(1) \
.key_by(lambda x: x[2]) \
.min() \
.map(lambda x: x[0], output_type=Types.STRING()) \
.key_by(lambda x: x) \
.max_by() \
.add_sink(self.test_sink)
self.env.execute("key_by_min_by_max_by_test_stream")
results = self.test_sink.get_results(False)
expected = ['a', 'a', 'a', 'a']
self.assert_equals_sorted(expected, results)
class ProcessDataStreamBatchTests(DataStreamBatchTests, ProcessDataStreamTests,
PyFlinkBatchTestCase):
def test_keyed_sum(self):
self.env.set_parallelism(1)
ds = self.env.from_collection(
[(1, 1), (1, 2), (1, 3), (5, 1), (5, 5)],
type_info=Types.ROW_NAMED(["v1", "v2"], [Types.INT(), Types.INT()])
)
def flat_map_func1(data):
for i in data:
yield 12, i
def flat_map_func2(data):
for i in data:
yield i
# First sum operator: Test Row type data and pass in field names.
# Second sum operator: Test Row type data and use parameter default value: 0.
# Third sum operator: Test Tuple type data and pass in field index number.
# Fourthly sum operator: Test Number(int) type data.
ds.key_by(lambda x: x[0]) \
.sum("v2") \
.key_by(lambda x: x[1]) \
.sum() \
.flat_map(flat_map_func1, output_type=Types.TUPLE([Types.INT(), Types.INT()])) \
.key_by(lambda x: x[0]) \
.sum(1) \
.flat_map(flat_map_func2, output_type=Types.INT()) \
.key_by(lambda x: x) \
.sum() \
.add_sink(self.test_sink)
self.env.execute("key_by_sum_test_batch")
results = self.test_sink.get_results(False)
expected = ['24']
self.assertEqual(expected, results)
def test_keyed_min_by_and_max(self):
self.env.set_parallelism(1)
ds = self.env.from_collection(
[(1, '9', 0), (1, '5', 1), (1, '6', 2), (5, '5', 0), (5, '3', 1)],
type_info=Types.ROW_NAMED(["v1", "v2", "v3"],
[Types.INT(), Types.STRING(), Types.INT()])
)
def flat_map_func1(data):
for i in data:
yield int(i), 1
def flat_map_func2(data):
for i in data:
yield i
ds.key_by(lambda x: x[0]) \
.min_by("v2") \
.map(lambda x: (x[0], x[1], x[2]),
output_type=Types.TUPLE([Types.INT(), Types.STRING(), Types.INT()])) \
.key_by(lambda x: x[2]) \
.max(0) \
.flat_map(flat_map_func1, output_type=Types.TUPLE([Types.INT(), Types.INT()])) \
.key_by(lambda x: [1]) \
.min_by() \
.flat_map(flat_map_func2, output_type=Types.INT()) \
.key_by(lambda x: x) \
.max_by() \
.add_sink(self.test_sink)
self.env.execute("key_by_min_by_max_by_test_batch")
results = self.test_sink.get_results(False)
expected = ['1']
self.assert_equals_sorted(expected, results)
class EmbeddedDataStreamStreamTests(DataStreamStreamingTests, PyFlinkStreamingTestCase):
def setUp(self):
super(EmbeddedDataStreamStreamTests, self).setUp()
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("python.execution-mode", "thread")
def test_metrics(self):
ds = self.env.from_collection(
[('ab', 'a', decimal.Decimal(1)),
('bdc', 'a', decimal.Decimal(2)),
('cfgs', 'a', decimal.Decimal(3)),
('deeefg', 'a', decimal.Decimal(4))],
type_info=Types.TUPLE(
[Types.STRING(), Types.STRING(), Types.BIG_DEC()]))
class MyMapFunction(MapFunction):
def __init__(self):
self.counter = None # type: Counter
self.counter_value = 0
self.meter = None # type: Meter
self.meter_value = 0
self.value_to_expose = 0
self.distribution = None # type: Distribution
def open(self, runtime_context: RuntimeContext):
self.counter = runtime_context.get_metrics_group().counter("my_counter")
self.meter = runtime_context.get_metrics_group().meter('my_meter', 1)
runtime_context.get_metrics_group().gauge("my_gauge", lambda: self.value_to_expose)
self.distribution = runtime_context.get_metrics_group().distribution(
"my_distribution")
def map(self, value):
self.counter.inc()
self.counter_value += 1
assert self.counter.get_count() == self.counter_value
self.meter.mark_event(1)
self.meter_value += 1
assert self.meter.get_count() == self.meter_value
self.value_to_expose += 1
self.distribution.update(int(value[2]))
return Row(value[0], len(value[0]), value[2])
(ds.key_by(lambda value: value[1])
.map(MyMapFunction(),
output_type=Types.ROW([Types.STRING(), Types.INT(), Types.BIG_DEC()]))
.add_sink(self.test_sink))
self.env.execute('test_basic_operations')
results = self.test_sink.get_results()
expected = ['+I[ab, 2, 1]', '+I[bdc, 3, 2]', '+I[cfgs, 4, 3]', '+I[deeefg, 6, 4]']
self.assert_equals_sorted(expected, results)
class EmbeddedDataStreamBatchTests(DataStreamBatchTests, PyFlinkBatchTestCase):
def setUp(self):
super(EmbeddedDataStreamBatchTests, self).setUp()
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("python.execution-mode", "thread")
class CommonDataStreamTests(PyFlinkTestCase):
def setUp(self) -> None:
super(CommonDataStreamTests, self).setUp()
self.env = StreamExecutionEnvironment.get_execution_environment()
self.env.set_parallelism(2)
self.env.set_runtime_mode(RuntimeExecutionMode.STREAMING)
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("akka.ask.timeout", "20 s")
self.test_sink = DataStreamTestSinkFunction()
def tearDown(self) -> None:
self.test_sink.clear()
def assert_equals_sorted(self, expected, actual):
# otherwise, it may thrown exceptions such as the following:
# TypeError: '<' not supported between instances of 'NoneType' and 'str'
expected.sort(key=lambda x: str(x))
actual.sort(key=lambda x: str(x))
self.assertEqual(expected, actual)
def test_data_stream_name(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')])
test_name = 'test_name'
ds.name(test_name)
self.assertEqual(test_name, ds.get_name())
def test_set_parallelism(self):
parallelism = 3
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')]).map(lambda x: x)
ds.set_parallelism(parallelism).add_sink(self.test_sink)
plan = eval(str(self.env.get_execution_plan()))
self.assertEqual(parallelism, plan['nodes'][1]['parallelism'])
def test_set_max_parallelism(self):
max_parallelism = 4
self.env.set_parallelism(8)
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')]).map(lambda x: x)
ds.set_parallelism(max_parallelism).add_sink(self.test_sink)
plan = eval(str(self.env.get_execution_plan()))
self.assertEqual(max_parallelism, plan['nodes'][1]['parallelism'])
def test_force_non_parallel(self):
self.env.set_parallelism(8)
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')])
ds.force_non_parallel().add_sink(self.test_sink)
plan = eval(str(self.env.get_execution_plan()))
self.assertEqual(1, plan['nodes'][0]['parallelism'])
def test_union(self):
ds_1 = self.env.from_collection([1, 2, 3])
ds_2 = self.env.from_collection([4, 5, 6])
ds_3 = self.env.from_collection([7, 8, 9])
unioned_stream = ds_3.union(ds_1, ds_2)
unioned_stream.map(lambda x: x + 1).add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
source_ids = []
union_node_pre_ids = []
for node in exec_plan['nodes']:
if node['pact'] == 'Data Source':
source_ids.append(node['id'])
if node['pact'] == 'Operator':
for pre in node['predecessors']:
union_node_pre_ids.append(pre['id'])
source_ids.sort()
union_node_pre_ids.sort()
self.assertEqual(source_ids, union_node_pre_ids)
def test_keyed_stream_union(self):
ds_1 = self.env.from_collection([1, 2, 3])
ds_2 = self.env.from_collection([4, 5, 6])
unioned_stream = ds_1.key_by(lambda x: x).union(ds_2.key_by(lambda x: x))
unioned_stream.add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
expected_union_node_pre_ids = []
union_node_pre_ids = []
for node in exec_plan['nodes']:
if node['type'] == '_keyed_stream_values_operator':
expected_union_node_pre_ids.append(node['id'])
if node['pact'] == 'Data Sink':
for pre in node['predecessors']:
union_node_pre_ids.append(pre['id'])
expected_union_node_pre_ids.sort()
union_node_pre_ids.sort()
self.assertEqual(expected_union_node_pre_ids, union_node_pre_ids)
def test_project(self):
ds = self.env.from_collection([[1, 2, 3, 4], [5, 6, 7, 8]],
type_info=Types.TUPLE(
[Types.INT(), Types.INT(), Types.INT(), Types.INT()]))
ds.project(1, 3).map(lambda x: (x[0], x[1] + 1)).add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
self.assertEqual(exec_plan['nodes'][1]['type'], 'Projection')
def test_broadcast(self):
ds_1 = self.env.from_collection([1, 2, 3])
ds_1.broadcast().map(lambda x: x + 1).set_parallelism(3).add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
broadcast_node = exec_plan['nodes'][1]
pre_ship_strategy = broadcast_node['predecessors'][0]['ship_strategy']
self.assertEqual(pre_ship_strategy, 'BROADCAST')
def test_rebalance(self):
ds_1 = self.env.from_collection([1, 2, 3])
ds_1.rebalance().map(lambda x: x + 1).set_parallelism(3).add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
rebalance_node = exec_plan['nodes'][1]
pre_ship_strategy = rebalance_node['predecessors'][0]['ship_strategy']
self.assertEqual(pre_ship_strategy, 'REBALANCE')
def test_rescale(self):
ds_1 = self.env.from_collection([1, 2, 3])
ds_1.rescale().map(lambda x: x + 1).set_parallelism(3).add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
rescale_node = exec_plan['nodes'][1]
pre_ship_strategy = rescale_node['predecessors'][0]['ship_strategy']
self.assertEqual(pre_ship_strategy, 'RESCALE')
def test_shuffle(self):
ds_1 = self.env.from_collection([1, 2, 3])
ds_1.shuffle().map(lambda x: x + 1).set_parallelism(3).add_sink(self.test_sink)
exec_plan = eval(self.env.get_execution_plan())
shuffle_node = exec_plan['nodes'][1]
pre_ship_strategy = shuffle_node['predecessors'][0]['ship_strategy']
self.assertEqual(pre_ship_strategy, 'SHUFFLE')
def test_keyed_stream_partitioning(self):
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)])
keyed_stream = ds.key_by(lambda x: x[1])
with self.assertRaises(Exception):
keyed_stream.shuffle()
with self.assertRaises(Exception):
keyed_stream.rebalance()
with self.assertRaises(Exception):
keyed_stream.rescale()
with self.assertRaises(Exception):
keyed_stream.broadcast()
with self.assertRaises(Exception):
keyed_stream.forward()
def test_slot_sharing_group(self):
source_operator_name = 'collection source'
map_operator_name = 'map_operator'
slot_sharing_group_1 = 'slot_sharing_group_1'
slot_sharing_group_2 = 'slot_sharing_group_2'
ds_1 = self.env.from_collection([1, 2, 3]).name(source_operator_name)
ds_1.slot_sharing_group(SlotSharingGroup.builder(slot_sharing_group_1).build()) \
.map(lambda x: x + 1).set_parallelism(3) \
.name(map_operator_name).slot_sharing_group(slot_sharing_group_2) \
.add_sink(self.test_sink)
j_generated_stream_graph = self.env._j_stream_execution_environment \
.getStreamGraph(True)
j_stream_nodes = list(j_generated_stream_graph.getStreamNodes().toArray())
for j_stream_node in j_stream_nodes:
if j_stream_node.getOperatorName() == source_operator_name:
self.assertEqual(j_stream_node.getSlotSharingGroup(), slot_sharing_group_1)
elif j_stream_node.getOperatorName() == map_operator_name:
self.assertEqual(j_stream_node.getSlotSharingGroup(), slot_sharing_group_2)
def test_chaining_strategy(self):
chained_operator_name_0 = "map_operator_0"
chained_operator_name_1 = "map_operator_1"
chained_operator_name_2 = "map_operator_2"
ds = self.env.from_collection([1, 2, 3])
ds.map(lambda x: x).set_parallelism(2).name(chained_operator_name_0)\
.map(lambda x: x).set_parallelism(2).name(chained_operator_name_1)\
.map(lambda x: x).set_parallelism(2).name(chained_operator_name_2)\
.add_sink(self.test_sink)
def assert_chainable(j_stream_graph, expected_upstream_chainable,
expected_downstream_chainable):
j_stream_nodes = list(j_stream_graph.getStreamNodes().toArray())
for j_stream_node in j_stream_nodes:
if j_stream_node.getOperatorName() == chained_operator_name_1:
JStreamingJobGraphGenerator = get_gateway().jvm \
.org.apache.flink.streaming.api.graph.StreamingJobGraphGenerator
j_in_stream_edge = j_stream_node.getInEdges().get(0)
upstream_chainable = JStreamingJobGraphGenerator.isChainable(j_in_stream_edge,
j_stream_graph)
self.assertEqual(expected_upstream_chainable, upstream_chainable)
j_out_stream_edge = j_stream_node.getOutEdges().get(0)
downstream_chainable = JStreamingJobGraphGenerator.isChainable(
j_out_stream_edge, j_stream_graph)
self.assertEqual(expected_downstream_chainable, downstream_chainable)
# The map_operator_1 has the same parallelism with map_operator_0 and map_operator_2, and
# ship_strategy for map_operator_0 and map_operator_1 is FORWARD, so the map_operator_1
# can be chained with map_operator_0 and map_operator_2.
j_generated_stream_graph = self.env._j_stream_execution_environment\
.getStreamGraph(True)
assert_chainable(j_generated_stream_graph, True, True)
ds = self.env.from_collection([1, 2, 3])
# Start a new chain for map_operator_1
ds.map(lambda x: x).set_parallelism(2).name(chained_operator_name_0) \
.map(lambda x: x).set_parallelism(2).name(chained_operator_name_1).start_new_chain() \
.map(lambda x: x).set_parallelism(2).name(chained_operator_name_2) \
.add_sink(self.test_sink)
j_generated_stream_graph = self.env._j_stream_execution_environment \
.getStreamGraph(True)
# We start a new chain for map operator, therefore, it cannot be chained with upstream
# operator, but can be chained with downstream operator.
assert_chainable(j_generated_stream_graph, False, True)
ds = self.env.from_collection([1, 2, 3])
# Disable chaining for map_operator_1
ds.map(lambda x: x).set_parallelism(2).name(chained_operator_name_0) \
.map(lambda x: x).set_parallelism(2).name(chained_operator_name_1).disable_chaining() \
.map(lambda x: x).set_parallelism(2).name(chained_operator_name_2) \
.add_sink(self.test_sink)
j_generated_stream_graph = self.env._j_stream_execution_environment \
.getStreamGraph(True)
# We disable chaining for map_operator_1, therefore, it cannot be chained with
# upstream and downstream operators.
assert_chainable(j_generated_stream_graph, False, False)
def test_execute_and_collect(self):
test_data = ['pyflink', 'datastream', 'execute', 'collect']
ds = self.env.from_collection(test_data)
# test collect with limit
expected = test_data[:3]
actual = []
for result in ds.execute_and_collect(limit=3):
actual.append(result)
self.assertEqual(expected, actual)
# test collect KeyedStream
test_data = [('pyflink', 1), ('datastream', 2), ('pyflink', 1), ('collect', 2)]
expected = [Row(f0='pyflink', f1=('pyflink', 1)),
Row(f0='datastream', f1=('datastream', 2)),
Row(f0='pyflink', f1=('pyflink', 1)),
Row(f0='collect', f1=('collect', 2))]
ds = self.env.from_collection(collection=test_data,
type_info=Types.TUPLE([Types.STRING(), Types.INT()]))
with ds.key_by(lambda i: i[0], Types.STRING()).execute_and_collect() as results:
actual = []
for result in results:
actual.append(result)
self.assertEqual(expected, actual)
# test all kinds of data types
test_data = [(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink',
datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0, microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000),
[1, 2, 3],
[['pyflink', 'datastream'], ['execute', 'collect']],
decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.0599999999999'
'9999899999999999')),
(2, None, 2, True, 23878, 652516352, 9.87, 2.98936,
bytearray(b'flink'), 'pyflink',
datetime.date(2015, 10, 14),
datetime.time(hour=11, minute=2, second=2, microsecond=234500),
datetime.datetime(2020, 4, 15, 8, 2, 6, 235000),
[2, 4, 6],
[['pyflink', 'datastream'], ['execute', 'collect']],
decimal.Decimal('2000000000000000000.74'),
decimal.Decimal('2000000000000000000.061111111111111'
'11111111111111'))]
expected = test_data
ds = self.env.from_collection(test_data)
with ds.execute_and_collect() as results:
actual = [result for result in results]
self.assert_equals_sorted(expected, actual)
# test primitive array
test_data = [[1, 2, 3], [4, 5]]
expected = test_data
ds = self.env.from_collection(test_data, type_info=Types.PRIMITIVE_ARRAY(Types.INT()))
with ds.execute_and_collect() as results:
actual = [r for r in results]
self.assert_equals_sorted(expected, actual)
test_data = [
(["test", "test"], [0.0, 0.0]),
([None, ], [0.0, 0.0])
]
# test object array
ds = self.env.from_collection(
test_data,
type_info=Types.TUPLE(
[Types.OBJECT_ARRAY(Types.STRING()), Types.OBJECT_ARRAY(Types.DOUBLE())]
)
)
expected = test_data
with ds.execute_and_collect() as results:
actual = [result for result in results]
self.assert_equals_sorted(expected, actual)
def test_function_with_error(self):
ds = self.env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1), ('e', 1)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
keyed_stream = ds.key_by(MyKeySelector(), key_type=Types.INT())
def flat_map_func(x):
raise ValueError('flat_map_func error')
yield x
from py4j.protocol import Py4JJavaError
import pytest
with pytest.raises(Py4JJavaError, match="flat_map_func error"):
keyed_stream.flat_map(flat_map_func).print()
self.env.execute("test_process_function_with_error")
def test_data_with_custom_class(self):
class Data(object):
def __init__(self, name, num):
self.name = name
self.num = num
ds = self.env.from_collection([('a', 0), ('b', 1), ('c', 2)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds.map(lambda a: Data(a[0], a[1])) \
.flat_map(lambda data: [data.name for _ in range(data.num)]) \
.add_sink(self.test_sink)
self.env.execute("test_data_with_custom_class")
results = self.test_sink.get_results(True)
expected = ['c', 'c', 'b']
self.assert_equals_sorted(expected, results)
class MyKeySelector(KeySelector):
def get_key(self, value):
return value[1]
class MyRichCoFlatMapFunction(CoFlatMapFunction):
def __init__(self):
self.map_state = None
def open(self, runtime_context: RuntimeContext):
self.map_state = runtime_context.get_map_state(
MapStateDescriptor("map", Types.STRING(), Types.BOOLEAN()))
def flat_map1(self, value):
yield str(value[0] + 1)
yield str(value[0] + 1)
def flat_map2(self, value):
if value[0] not in self.map_state:
self.map_state[value[0]] = True
yield value[0]
class MyKeyedCoProcessFunction(KeyedCoProcessFunction):
def __init__(self):
self.count_state = None
self.timer_registered = False
def open(self, runtime_context: RuntimeContext):
self.timer_registered = False
self.count_state = runtime_context.get_state(ValueStateDescriptor("count", Types.INT()))
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
if not self.timer_registered:
ctx.timer_service().register_event_time_timer(3)
self.timer_registered = True
count = self.count_state.value()
if count is None:
count = 1
else:
count += 1
self.count_state.update(count)
return [Row(value[0], count)]
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
count = self.count_state.value()
if count is None:
count = 1
else:
count += 1
self.count_state.update(count)
return [Row(value[0], count)]
def on_timer(self, timestamp: int, ctx: 'KeyedCoProcessFunction.OnTimerContext'):
return [Row("on_timer", timestamp)]
class MyReduceFunction(ReduceFunction):
def __init__(self):
self.state = None
def open(self, runtime_context: RuntimeContext):
self.state = runtime_context.get_state(
ValueStateDescriptor("test_state", Types.INT()))
def reduce(self, value1, value2):
state_value = self.state.value()
if state_value is None:
state_value = 2
else:
state_value += 1
result_value = Row(value1[0] + value2[0], value1[1])
if result_value[0] == 'ab':
assert state_value == 2
if result_value[0] == 'cde':
assert state_value == 3
self.state.update(state_value)
return result_value
class SecondColumnTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
| 79,220 | 44.450947 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_util.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import pickle
from pyflink.datastream.functions import SinkFunction
from pyflink.java_gateway import get_gateway
class DataStreamTestSinkFunction(SinkFunction):
"""
A util class to collect test DataStream transformation results.
"""
def __init__(self):
self.j_data_stream_collect_sink = get_gateway().jvm \
.org.apache.flink.python.util.DataStreamTestCollectSink()
super(DataStreamTestSinkFunction, self).__init__(sink_func=self.j_data_stream_collect_sink)
def get_results(self, is_python_object: bool = False, stringify: bool = True):
j_results = self.get_java_function().collectAndClear(is_python_object)
results = list(j_results)
if not is_python_object:
return results
else:
str_results = []
for result in results:
pickled_result = pickle.loads(result)
if stringify:
str_results.append(str(pickled_result))
else:
str_results.append(pickled_result)
return str_results
def clear(self):
if self.j_data_stream_collect_sink is None:
return
self.j_data_stream_collect_sink.clear()
| 2,192 | 39.611111 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_slot_sharing_group.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.slot_sharing_group import MemorySize, SlotSharingGroup
from pyflink.testing.test_case_utils import PyFlinkTestCase
class SlotSharingGroupTests(PyFlinkTestCase):
def test_build_slot_sharing_group_with_specific_resource(self):
name = 'slot_sharing_group'
heap_memory = MemorySize.of_mebi_bytes(100)
off_heap_memory = MemorySize.of_mebi_bytes(200)
managed_memory = MemorySize.of_mebi_bytes(300)
slot_sharing_group = SlotSharingGroup.builder(name) \
.set_cpu_cores(1.0) \
.set_task_heap_memory(heap_memory) \
.set_task_off_heap_memory(off_heap_memory) \
.set_managed_memory(managed_memory) \
.set_external_resource('gpu', 1.0) \
.build()
self.assertEqual(slot_sharing_group.get_name(), name)
self.assertEqual(slot_sharing_group.get_cpu_cores(), 1.0)
self.assertEqual(slot_sharing_group.get_task_heap_memory(), heap_memory)
self.assertEqual(slot_sharing_group.get_task_off_heap_memory(), off_heap_memory)
self.assertEqual(slot_sharing_group.get_managed_memory(), managed_memory)
self.assertEqual(slot_sharing_group.get_external_resources(), {'gpu': 1.0})
def test_build_slot_sharing_group_with_unknown_resource(self):
name = 'slot_sharing_group'
slot_sharing_group = SlotSharingGroup.builder(name).build()
self.assertEqual(slot_sharing_group.get_name(), name)
self.assertIsNone(slot_sharing_group.get_cpu_cores())
self.assertIsNone(slot_sharing_group.get_task_heap_memory())
self.assertIsNone(slot_sharing_group.get_task_off_heap_memory())
self.assertIsNone(slot_sharing_group.get_managed_memory())
self.assertEqual(slot_sharing_group.get_external_resources(), {})
def test_build_slot_sharing_group_with_illegal_config(self):
with self.assertRaises(Exception):
SlotSharingGroup.builder("slot_sharing_group") \
.set_cpu_cores(1.0) \
.set_task_heap_memory(MemorySize(bytes_size=0)) \
.set_task_off_heap_memory_mb(10) \
.build()
def test_build_slot_sharing_group_without_all_required_config(self):
with self.assertRaises(Exception):
SlotSharingGroup.builder("slot_sharing_group") \
.set_cpu_cores(1.0) \
.set_task_off_heap_memory_mb(10) \
.build()
| 3,420 | 50.059701 | 88 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_stream_execution_environment.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import glob
import os
import shutil
import tempfile
import time
import uuid
from pyflink.common import Configuration, ExecutionConfig, RestartStrategies
from pyflink.common.typeinfo import Types
from pyflink.datastream import (StreamExecutionEnvironment, CheckpointConfig,
CheckpointingMode, MemoryStateBackend, TimeCharacteristic,
SlotSharingGroup)
from pyflink.datastream.connectors.kafka import FlinkKafkaConsumer
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.formats.json import JsonRowDeserializationSchema
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.slot_sharing_group import MemorySize
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes, StreamTableEnvironment, EnvironmentSettings
from pyflink.testing.test_case_utils import PyFlinkTestCase, exec_insert_table
from pyflink.util.java_utils import get_j_env_configuration
class StreamExecutionEnvironmentTests(PyFlinkTestCase):
def setUp(self):
os.environ['_python_worker_execution_mode'] = "loopback"
self.env = StreamExecutionEnvironment.get_execution_environment()
os.environ['_python_worker_execution_mode'] = "process"
self.env.set_parallelism(2)
self.test_sink = DataStreamTestSinkFunction()
def test_get_config(self):
execution_config = self.env.get_config()
self.assertIsInstance(execution_config, ExecutionConfig)
def test_get_set_parallelism(self):
self.env.set_parallelism(10)
parallelism = self.env.get_parallelism()
self.assertEqual(parallelism, 10)
def test_get_set_buffer_timeout(self):
self.env.set_buffer_timeout(12000)
timeout = self.env.get_buffer_timeout()
self.assertEqual(timeout, 12000)
def test_get_set_default_local_parallelism(self):
self.env.set_default_local_parallelism(8)
parallelism = self.env.get_default_local_parallelism()
self.assertEqual(parallelism, 8)
def test_set_get_restart_strategy(self):
self.env.set_restart_strategy(RestartStrategies.no_restart())
restart_strategy = self.env.get_restart_strategy()
self.assertEqual(restart_strategy, RestartStrategies.no_restart())
def test_add_default_kryo_serializer(self):
self.env.add_default_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_default_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type_with_kryo_serializer(self):
self.env.register_type_with_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.env.get_config().get_registered_types_with_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type(self):
self.env.register_type("org.apache.flink.runtime.state.StateBackendTestBase$TestPojo")
type_list = self.env.get_config().get_registered_pojo_types()
self.assertEqual(type_list,
['org.apache.flink.runtime.state.StateBackendTestBase$TestPojo'])
def test_get_set_max_parallelism(self):
self.env.set_max_parallelism(12)
parallelism = self.env.get_max_parallelism()
self.assertEqual(parallelism, 12)
def test_set_runtime_mode(self):
self.env.set_runtime_mode(RuntimeExecutionMode.BATCH)
config = get_j_env_configuration(self.env._j_stream_execution_environment)
runtime_mode = config.getValue(
get_gateway().jvm.org.apache.flink.configuration.ExecutionOptions.RUNTIME_MODE)
self.assertEqual(runtime_mode, "BATCH")
def test_operation_chaining(self):
self.assertTrue(self.env.is_chaining_enabled())
self.env.disable_operator_chaining()
self.assertFalse(self.env.is_chaining_enabled())
def test_get_checkpoint_config(self):
checkpoint_config = self.env.get_checkpoint_config()
self.assertIsInstance(checkpoint_config, CheckpointConfig)
def test_get_set_checkpoint_interval(self):
self.env.enable_checkpointing(30000)
interval = self.env.get_checkpoint_interval()
self.assertEqual(interval, 30000)
def test_get_set_checkpointing_mode(self):
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.EXACTLY_ONCE)
self.env.enable_checkpointing(30000, CheckpointingMode.AT_LEAST_ONCE)
mode = self.env.get_checkpointing_mode()
self.assertEqual(mode, CheckpointingMode.AT_LEAST_ONCE)
def test_get_state_backend(self):
state_backend = self.env.get_state_backend()
self.assertIsNone(state_backend)
def test_set_state_backend(self):
input_backend = MemoryStateBackend()
self.env.set_state_backend(input_backend)
output_backend = self.env.get_state_backend()
self.assertEqual(output_backend._j_memory_state_backend,
input_backend._j_memory_state_backend)
def test_is_changelog_state_backend_enabled(self):
self.assertIsNone(self.env.is_changelog_state_backend_enabled())
def test_enable_changelog_state_backend(self):
self.env.enable_changelog_state_backend(True)
self.assertTrue(self.env.is_changelog_state_backend_enabled())
self.env.enable_changelog_state_backend(False)
self.assertFalse(self.env.is_changelog_state_backend_enabled())
def test_get_set_stream_time_characteristic(self):
default_time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(default_time_characteristic, TimeCharacteristic.EventTime)
self.env.set_stream_time_characteristic(TimeCharacteristic.ProcessingTime)
time_characteristic = self.env.get_stream_time_characteristic()
self.assertEqual(time_characteristic, TimeCharacteristic.ProcessingTime)
def test_configure(self):
configuration = Configuration()
configuration.set_string('pipeline.operator-chaining', 'false')
configuration.set_string('pipeline.time-characteristic', 'IngestionTime')
configuration.set_string('execution.buffer-timeout', '1 min')
configuration.set_string('execution.checkpointing.timeout', '12000')
configuration.set_string('state.backend', 'jobmanager')
self.env.configure(configuration)
self.assertEqual(self.env.is_chaining_enabled(), False)
self.assertEqual(self.env.get_stream_time_characteristic(),
TimeCharacteristic.IngestionTime)
self.assertEqual(self.env.get_buffer_timeout(), 60000)
self.assertEqual(self.env.get_checkpoint_config().get_checkpoint_timeout(), 12000)
self.assertTrue(isinstance(self.env.get_state_backend(), MemoryStateBackend))
def test_execute(self):
tmp_dir = tempfile.gettempdir()
t_env = StreamTableEnvironment.create(self.env)
t_env.execute_sql("""
CREATE TABLE Results (
a BIGINT,
b STRING,
c STRING
) WITH (
'connector' = 'filesystem',
'path'='{0}/{1}.csv',
'format' = 'csv'
)
""".format(tmp_dir, round(time.time())))
execution_result = exec_insert_table(
t_env.from_elements([(1, 'Hi', 'Hello')], ['a', 'b', 'c']),
'Results')
self.assertIsNotNone(execution_result.get_job_id())
self.assertIsNotNone(execution_result.get_net_runtime())
self.assertEqual(len(execution_result.get_all_accumulator_results()), 0)
self.assertIsNone(execution_result.get_accumulator_result('accumulator'))
self.assertIsNotNone(str(execution_result))
def test_from_collection_without_data_types(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')])
ds.add_sink(self.test_sink)
self.env.execute("test from collection")
results = self.test_sink.get_results(True)
# user does not specify data types for input data, the collected result should be in
# in tuple format as inputs.
expected = ["(1, 'Hi', 'Hello')", "(2, 'Hello', 'Hi')"]
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_from_collection_with_data_types(self):
# verify from_collection for the collection with single object.
ds = self.env.from_collection(['Hi', 'Hello'], type_info=Types.STRING())
ds.add_sink(self.test_sink)
self.env.execute("test from collection with single object")
results = self.test_sink.get_results(False)
expected = ['Hello', 'Hi']
results.sort()
expected.sort()
self.assertEqual(expected, results)
# verify from_collection for the collection with multiple objects like tuple.
ds = self.env.from_collection([(1, None, 1, True, 32767, -2147483648, 1.23, 1.98932,
bytearray(b'flink'), 'pyflink', datetime.date(2014, 9, 13),
datetime.time(hour=12, minute=0, second=0,
microsecond=123000),
datetime.datetime(2018, 3, 11, 3, 0, 0, 123000), [1, 2, 3],
decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.0599999999999'
'9999899999999999')),
(2, None, 2, True, 43878, 9147483648, 9.87, 2.98936,
bytearray(b'flink'), 'pyflink', datetime.date(2015, 10, 14),
datetime.time(hour=11, minute=2, second=2,
microsecond=234500),
datetime.datetime(2020, 4, 15, 8, 2, 6, 235000), [2, 4, 6],
decimal.Decimal('2000000000000000000.74'),
decimal.Decimal('2000000000000000000.061111111111111'
'11111111111111'))],
type_info=Types.ROW(
[Types.LONG(), Types.LONG(), Types.SHORT(),
Types.BOOLEAN(), Types.SHORT(), Types.INT(),
Types.FLOAT(), Types.DOUBLE(),
Types.PICKLED_BYTE_ARRAY(),
Types.STRING(), Types.SQL_DATE(), Types.SQL_TIME(),
Types.SQL_TIMESTAMP(),
Types.BASIC_ARRAY(Types.LONG()), Types.BIG_DEC(),
Types.BIG_DEC()]))
ds.add_sink(self.test_sink)
self.env.execute("test from collection with tuple object")
results = self.test_sink.get_results(False)
# if user specifies data types of input data, the collected result should be in row format.
expected = [
'+I[1, null, 1, true, 32767, -2147483648, 1.23, 1.98932, [102, 108, 105, 110, 107], '
'pyflink, 2014-09-13, 12:00:00, 2018-03-11 03:00:00.123, [1, 2, 3], '
'1000000000000000000.05, 1000000000000000000.05999999999999999899999999999]',
'+I[2, null, 2, true, -21658, 557549056, 9.87, 2.98936, [102, 108, 105, 110, 107], '
'pyflink, 2015-10-14, 11:02:02, 2020-04-15 08:02:06.235, [2, 4, 6], '
'2000000000000000000.74, 2000000000000000000.06111111111111111111111111111]']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_add_custom_source(self):
custom_source = SourceFunction("org.apache.flink.python.util.MyCustomSourceFunction")
ds = self.env.add_source(custom_source, type_info=Types.ROW([Types.INT(), Types.STRING()]))
ds.add_sink(self.test_sink)
self.env.execute("test add custom source")
results = self.test_sink.get_results(False)
expected = [
'+I[3, Mike]',
'+I[1, Marry]',
'+I[4, Ted]',
'+I[5, Jack]',
'+I[0, Bob]',
'+I[2, Henry]']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_read_text_file(self):
texts = ["Mike", "Marry", "Ted", "Jack", "Bob", "Henry"]
text_file_path = self.tempdir + '/text_file'
with open(text_file_path, 'a') as f:
for text in texts:
f.write(text)
f.write('\n')
ds = self.env.read_text_file(text_file_path)
ds.add_sink(self.test_sink)
self.env.execute("test read text file")
results = self.test_sink.get_results()
results.sort()
texts.sort()
self.assertEqual(texts, results)
def test_execute_async(self):
ds = self.env.from_collection([(1, 'Hi', 'Hello'), (2, 'Hello', 'Hi')],
type_info=Types.ROW(
[Types.INT(), Types.STRING(), Types.STRING()]))
ds.add_sink(self.test_sink)
job_client = self.env.execute_async("test execute async")
job_id = job_client.get_job_id()
self.assertIsNotNone(job_id)
execution_result = job_client.get_job_execution_result().result()
self.assertEqual(str(job_id), str(execution_result.get_job_id()))
def test_add_python_file(self):
import uuid
env = self.env
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_dep1.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
def plus_two_map(value):
from test_dep1 import add_two
return add_two(value)
get_j_env_configuration(env._j_stream_execution_environment).\
setString("taskmanager.numberOfTaskSlots", "10")
env.add_python_file(python_file_path)
ds = env.from_collection([1, 2, 3, 4, 5])
ds = ds.map(plus_two_map, Types.LONG()) \
.slot_sharing_group("data_stream") \
.map(lambda i: i, Types.LONG()) \
.slot_sharing_group("table")
python_file_path = os.path.join(python_file_dir, "test_dep2.py")
with open(python_file_path, 'w') as f:
f.write("def add_three(a):\n return a + 3")
def plus_three(value):
from test_dep2 import add_three
return add_three(value)
t_env = StreamTableEnvironment.create(
stream_execution_environment=env,
environment_settings=EnvironmentSettings.in_streaming_mode())
env.add_python_file(python_file_path)
from pyflink.table.udf import udf
from pyflink.table.expressions import col
add_three = udf(plus_three, result_type=DataTypes.BIGINT())
tab = t_env.from_data_stream(ds, col('a')) \
.select(add_three(col('a')))
t_env.to_append_stream(tab, Types.ROW([Types.LONG()])) \
.map(lambda i: i[0]) \
.add_sink(self.test_sink)
env.execute("test add_python_file")
result = self.test_sink.get_results(True)
expected = ['6', '7', '8', '9', '10']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_python_file_2(self):
import uuid
env = self.env
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_dep1.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
def plus_two_map(value):
from test_dep1 import add_two
return add_two(value)
get_j_env_configuration(env._j_stream_execution_environment).\
setString("taskmanager.numberOfTaskSlots", "10")
env.add_python_file(python_file_path)
ds = env.from_collection([1, 2, 3, 4, 5])
ds = ds.map(plus_two_map, Types.LONG()) \
.slot_sharing_group("data_stream") \
.map(lambda i: i, Types.LONG()) \
.slot_sharing_group("table")
python_file_path = os.path.join(python_file_dir, "test_dep2.py")
with open(python_file_path, 'w') as f:
f.write("def add_three(a):\n return a + 3")
def plus_three(value):
from test_dep2 import add_three
return add_three(value)
env.add_python_file(python_file_path)
t_env = StreamTableEnvironment.create(
stream_execution_environment=env,
environment_settings=EnvironmentSettings.in_streaming_mode())
from pyflink.table.udf import udf
from pyflink.table.expressions import col
add_three = udf(plus_three, result_type=DataTypes.BIGINT())
tab = t_env.from_data_stream(ds, col('a')) \
.select(add_three(col('a')))
result = [i[0] for i in tab.execute().collect()]
expected = [6, 7, 8, 9, 10]
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_requirements_without_cached_directory(self):
import uuid
requirements_txt_path = os.path.join(self.tempdir, str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("cloudpickle==2.2.0")
self.env.set_python_requirements(requirements_txt_path)
def check_requirements(i):
import cloudpickle # noqa # pylint: disable=unused-import
return i
ds = self.env.from_collection([1, 2, 3, 4, 5])
ds.map(check_requirements).add_sink(self.test_sink)
self.env.execute("test set requirements without cache dir")
result = self.test_sink.get_results(True)
expected = ['1', '2', '3', '4', '5']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_requirements_with_cached_directory(self):
import uuid
tmp_dir = self.tempdir
env = self.env
requirements_txt_path = os.path.join(tmp_dir, "requirements_txt_" + str(uuid.uuid4()))
with open(requirements_txt_path, 'w') as f:
f.write("python-package1==0.0.0")
requirements_dir_path = os.path.join(tmp_dir, "requirements_dir_" + str(uuid.uuid4()))
os.mkdir(requirements_dir_path)
package_file_name = "python-package1-0.0.0.tar.gz"
with open(os.path.join(requirements_dir_path, package_file_name), 'wb') as f:
import base64
# This base64 data is encoded from a python package file which includes a
# "python_package1" module. The module contains a "plus(a, b)" function.
# The base64 can be recomputed by following code:
# base64.b64encode(open("python-package1-0.0.0.tar.gz", "rb").read()).decode("utf-8")
f.write(base64.b64decode(
"H4sICNefrV0C/2Rpc3QvcHl0aG9uLXBhY2thZ2UxLTAuMC4wLnRhcgDtmVtv2jAYhnPtX2H1CrRCY+ckI"
"XEx7axuUA11u5imyICTRc1JiVnHfv1MKKWjYxwKEdPehws7xkmUfH5f+3PyqfqWpa1cjG5EKFnLbOvfhX"
"FQTI3nOPPSdavS5Pa8nGMwy3Esi3ke9wyTObbnGNQxamBSKlFQavzUryG8ldG6frpbEGx4yNmDLMp/hPy"
"P8b+6fNN613vdP1z8XdteG3+ug/17/F3Hcw1qIv5H54NUYiyUaH2SRRllaYeytkl6IpEdujI2yH2XapCQ"
"wSRJRDHt0OveZa//uUfeZonUvUO5bHo+0ZcoVo9bMhFRvGx9H41kWj447aUsR0WUq+pui8arWKggK5Jli"
"wGOo/95q79ovXi6/nfyf246Dof/n078fT9KI+X77Xx6BP83bX4Xf5NxT7dz7toO/L8OxjKgeTwpG+KcDp"
"sdQjWFVJMipYI+o0MCk4X/t2UYtqI0yPabCHb3f861XcD/Ty/+Y5nLdCzT0dSPo/SmbKsf6un+b7KV+Ls"
"W4/D/OoC9w/930P9eGwM75//csrD+Q/6P/P/k9D/oX3988Wqw1bS/tf6tR+s/m3EG/ddBqXO9XKf15C8p"
"P9k4HZBtBgzZaVW5vrfKcj+W32W82ygEB9D/Xu9+4/qfP9L/rBv0X1v87yONKRX61/qfzwqjIDzIPTbv/"
"7or3/88i0H/tfBFW7s/s/avRInQH06ieEy7tDrQeYHUdRN7wP+n/vf62LOH/pld7f9xz7a5Pfufedy0oP"
"86iJI8KxStAq6yLC4JWdbbVbWRikR2z1ZGytk5vauW3QdnBFE6XqwmykazCesAAAAAAAAAAAAAAAAAAAA"
"AAAAAAAAAAAAAAOBw/AJw5CHBAFAAAA=="))
env.set_python_requirements(requirements_txt_path, requirements_dir_path)
def add_one(i):
from python_package1 import plus
return plus(i, 1)
ds = env.from_collection([1, 2, 3, 4, 5])
ds.map(add_one).add_sink(self.test_sink)
env.execute("test set requirements with cachd dir")
result = self.test_sink.get_results(True)
expected = ['2', '3', '4', '5', '6']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_python_archive(self):
import uuid
import shutil
tmp_dir = self.tempdir
env = self.env
archive_dir_path = os.path.join(tmp_dir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("2")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
env.add_python_archive(archive_file_path, "data")
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i + int(f.read())
ds = env.from_collection([1, 2, 3, 4, 5])
ds.map(add_from_file).add_sink(self.test_sink)
env.execute("test set python archive")
result = self.test_sink.get_results(True)
expected = ['3', '4', '5', '6', '7']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_set_stream_env(self):
import sys
env = self.env
python_exec_link_path = sys.executable
env.set_python_executable(python_exec_link_path)
def check_python_exec(i):
import os
assert os.environ["python"] == python_exec_link_path
return i
ds = env.from_collection([1, 2, 3, 4, 5])
ds.map(check_python_exec).add_sink(self.test_sink)
env.execute("test set python executable")
result = self.test_sink.get_results(True)
expected = ['1', '2', '3', '4', '5']
result.sort()
expected.sort()
self.assertEqual(expected, result)
def test_add_jars(self):
# find kafka connector jars
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + '/flink-connectors/flink-sql-connector-kafka'
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
self.env.add_jars(*specific_jars)
source_topic = 'test_source_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
# Will get a ClassNotFoundException if not add the kafka connector into the pipeline jars.
kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
self.env.add_source(kafka_consumer).print()
self.env.get_execution_plan()
def test_add_classpaths(self):
# find kafka connector jars
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + '/flink-connectors/flink-sql-connector-kafka'
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
self.env.add_classpaths(*specific_jars)
source_topic = 'test_source_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
# It Will raise a ClassNotFoundException if the kafka connector is not added into the
# pipeline classpaths.
kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
self.env.add_source(kafka_consumer).print()
self.env.get_execution_plan()
def test_generate_stream_graph_with_dependencies(self):
python_file_dir = os.path.join(self.tempdir, "python_file_dir_" + str(uuid.uuid4()))
os.mkdir(python_file_dir)
python_file_path = os.path.join(python_file_dir, "test_stream_dependency_manage_lib.py")
with open(python_file_path, 'w') as f:
f.write("def add_two(a):\n return a + 2")
env = self.env
env.add_python_file(python_file_path)
def plus_two_map(value):
from test_stream_dependency_manage_lib import add_two
return value[0], add_two(value[1])
def add_from_file(i):
with open("data/data.txt", 'r') as f:
return i[0], i[1] + int(f.read())
from_collection_source = env.from_collection([('a', 0), ('b', 0), ('c', 1), ('d', 1),
('e', 2)],
type_info=Types.ROW([Types.STRING(),
Types.INT()]))
from_collection_source.name("From Collection")
keyed_stream = from_collection_source.key_by(lambda x: x[1], key_type=Types.INT())
plus_two_map_stream = keyed_stream.map(plus_two_map).name("Plus Two Map").set_parallelism(3)
add_from_file_map = plus_two_map_stream.map(add_from_file).name("Add From File Map")
test_stream_sink = add_from_file_map.add_sink(self.test_sink).name("Test Sink")
test_stream_sink.set_parallelism(4)
archive_dir_path = os.path.join(self.tempdir, "archive_" + str(uuid.uuid4()))
os.mkdir(archive_dir_path)
with open(os.path.join(archive_dir_path, "data.txt"), 'w') as f:
f.write("3")
archive_file_path = \
shutil.make_archive(os.path.dirname(archive_dir_path), 'zip', archive_dir_path)
env.add_python_archive(archive_file_path, "data")
nodes = eval(env.get_execution_plan())['nodes']
# The StreamGraph should be as bellow:
# Source: From Collection -> _stream_key_by_map_operator ->
# Plus Two Map -> Add From File Map -> Sink: Test Sink.
# Source: From Collection and _stream_key_by_map_operator should have same parallelism.
self.assertEqual(nodes[0]['parallelism'], nodes[1]['parallelism'])
# The parallelism of Plus Two Map should be 3
self.assertEqual(nodes[2]['parallelism'], 3)
# The ship_strategy for Source: From Collection and _stream_key_by_map_operator should be
# FORWARD
self.assertEqual(nodes[1]['predecessors'][0]['ship_strategy'], "FORWARD")
# The ship_strategy for _keyed_stream_values_operator and Plus Two Map should be
# HASH
self.assertEqual(nodes[2]['predecessors'][0]['ship_strategy'], "HASH")
# The parallelism of Sink: Test Sink should be 4
self.assertEqual(nodes[4]['parallelism'], 4)
python_dependency_config = dict(
get_gateway().jvm.org.apache.flink.python.util.PythonDependencyUtils.
configurePythonDependencies(
env._j_stream_execution_environment.getCachedFiles(),
env._j_stream_execution_environment.getConfiguration()).toMap())
# Make sure that user specified files and archives are correctly added.
self.assertIsNotNone(python_dependency_config['python.internal.files-key-map'])
self.assertIsNotNone(python_dependency_config['python.internal.archives-key-map'])
def test_register_slot_sharing_group(self):
slot_sharing_group_1 = SlotSharingGroup.builder('slot_sharing_group_1') \
.set_cpu_cores(1.0).set_task_heap_memory_mb(100).build()
slot_sharing_group_2 = SlotSharingGroup.builder('slot_sharing_group_2') \
.set_cpu_cores(2.0).set_task_heap_memory_mb(200).build()
slot_sharing_group_3 = SlotSharingGroup.builder('slot_sharing_group_3').build()
self.env.register_slot_sharing_group(slot_sharing_group_1)
self.env.register_slot_sharing_group(slot_sharing_group_2)
self.env.register_slot_sharing_group(slot_sharing_group_3)
ds = self.env.from_collection([1, 2, 3]).slot_sharing_group(
'slot_sharing_group_1')
ds.map(lambda x: x + 1).set_parallelism(3) \
.slot_sharing_group('slot_sharing_group_2') \
.add_sink(self.test_sink)
j_generated_stream_graph = self.env._j_stream_execution_environment \
.getStreamGraph(True)
j_resource_profile_1 = j_generated_stream_graph.getSlotSharingGroupResource(
'slot_sharing_group_1').get()
j_resource_profile_2 = j_generated_stream_graph.getSlotSharingGroupResource(
'slot_sharing_group_2').get()
j_resource_profile_3 = j_generated_stream_graph.getSlotSharingGroupResource(
'slot_sharing_group_3')
self.assertEqual(j_resource_profile_1.getCpuCores().getValue(), 1.0)
self.assertEqual(MemorySize(j_memory_size=j_resource_profile_1.getTaskHeapMemory()),
MemorySize.of_mebi_bytes(100))
self.assertEqual(j_resource_profile_2.getCpuCores().getValue(), 2.0)
self.assertEqual(MemorySize(j_memory_size=j_resource_profile_2.getTaskHeapMemory()),
MemorySize.of_mebi_bytes(200))
self.assertFalse(j_resource_profile_3.isPresent())
def test_register_cached_file(self):
texts = ['machen', 'zeit', 'heerscharen', 'keiner', 'meine']
text_path = self.tempdir + '/text_file'
with open(text_path, 'a') as f:
for text in texts:
f.write(text)
f.write('\n')
self.env.register_cached_file(text_path, 'cache_test')
cached_files = self.env._j_stream_execution_environment.getCachedFiles()
self.assertEqual(cached_files.size(), 1)
self.assertEqual(cached_files[0].getField(0), 'cache_test')
def tearDown(self) -> None:
self.test_sink.clear()
| 32,884 | 44.800836 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_state_backend.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.state_backend import (_from_j_state_backend, CustomStateBackend,
MemoryStateBackend, FsStateBackend,
RocksDBStateBackend, PredefinedOptions,
EmbeddedRocksDBStateBackend)
from pyflink.java_gateway import get_gateway
from pyflink.pyflink_gateway_server import on_windows
from pyflink.testing.test_case_utils import PyFlinkTestCase
from pyflink.util.java_utils import load_java_class
class MemoryStateBackendTests(PyFlinkTestCase):
def test_constant(self):
gateway = get_gateway()
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory \
.MemoryStateBackend
self.assertEqual(MemoryStateBackend.DEFAULT_MAX_STATE_SIZE,
JMemoryStateBackend.DEFAULT_MAX_STATE_SIZE)
def test_create_memory_state_backend(self):
self.assertIsNotNone(MemoryStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(MemoryStateBackend("file://var/checkpoints/",
"file://var/savepoints/"))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000, True))
self.assertIsNotNone(MemoryStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 10000000, False))
def test_get_max_state_size(self):
state_backend = MemoryStateBackend()
self.assertEqual(state_backend.get_max_state_size(),
MemoryStateBackend.DEFAULT_MAX_STATE_SIZE)
state_backend = MemoryStateBackend(max_state_size=50000)
self.assertEqual(state_backend.get_max_state_size(), 50000)
class FsStateBackendTests(PyFlinkTestCase):
def test_create_fs_state_backend(self):
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/", "file://var/savepoints/"))
self.assertIsNotNone(FsStateBackend("file://var/checkpoints/",
"file://var/savepoints/", 2048))
self.assertIsNotNone(FsStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 2048, 2048, True))
self.assertIsNotNone(FsStateBackend(
"file://var/checkpoints/", "file://var/savepoints/", 2048, 4096))
def test_get_min_file_size_threshold(self):
state_backend = FsStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_min_file_size_threshold(), 20480)
state_backend = FsStateBackend("file://var/checkpoints/", file_state_size_threshold=2048)
self.assertEqual(state_backend.get_min_file_size_threshold(), 2048)
def test_get_checkpoint_path(self):
state_backend = FsStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_checkpoint_path(), "file://var/checkpoints")
class EmbeddedRocksDBStateBackendTests(PyFlinkTestCase):
def test_create_rocks_db_state_backend(self):
self.assertIsNotNone(EmbeddedRocksDBStateBackend())
self.assertIsNotNone(EmbeddedRocksDBStateBackend(True))
self.assertIsNotNone(EmbeddedRocksDBStateBackend(False))
def test_get_set_db_storage_paths(self):
if on_windows():
storage_path = ["file:/C:/var/db_storage_dir1/",
"file:/C:/var/db_storage_dir2/",
"file:/C:/var/db_storage_dir3/"]
expected = ["C:\\var\\db_storage_dir1",
"C:\\var\\db_storage_dir2",
"C:\\var\\db_storage_dir3"]
else:
storage_path = ["file://var/db_storage_dir1/",
"file://var/db_storage_dir2/",
"file://var/db_storage_dir3/"]
expected = ["/db_storage_dir1",
"/db_storage_dir2",
"/db_storage_dir3"]
state_backend = EmbeddedRocksDBStateBackend()
state_backend.set_db_storage_paths(*storage_path)
self.assertEqual(state_backend.get_db_storage_paths(), expected)
def test_get_set_predefined_options(self):
state_backend = EmbeddedRocksDBStateBackend()
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.FLASH_SSD_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.FLASH_SSD_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.DEFAULT)
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
def test_get_set_options(self):
state_backend = EmbeddedRocksDBStateBackend()
self.assertIsNone(state_backend.get_options())
state_backend.set_options(
"org.apache.flink.contrib.streaming.state."
"RocksDBStateBackendConfigTest$TestOptionsFactory")
self.assertEqual(state_backend.get_options(),
"org.apache.flink.contrib.streaming.state."
"RocksDBStateBackendConfigTest$TestOptionsFactory")
def test_get_set_number_of_transfer_threads(self):
state_backend = EmbeddedRocksDBStateBackend()
self.assertEqual(state_backend.get_number_of_transfer_threads(), 4)
state_backend.set_number_of_transfer_threads(8)
self.assertEqual(state_backend.get_number_of_transfer_threads(), 8)
class RocksDBStateBackendTests(PyFlinkTestCase):
def test_create_rocks_db_state_backend(self):
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/"))
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/", True))
self.assertIsNotNone(RocksDBStateBackend("file://var/checkpoints/", False))
self.assertIsNotNone(RocksDBStateBackend(
checkpoint_stream_backend=FsStateBackend("file://var/checkpoints/")))
def test_get_checkpoint_backend(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
checkpoint_backend = state_backend.get_checkpoint_backend()
self.assertIsInstance(checkpoint_backend, FsStateBackend)
self.assertEqual(checkpoint_backend.get_checkpoint_path(), "file://var/checkpoints")
def test_get_set_db_storage_paths(self):
if on_windows():
checkpoints_path = "file:/C:/var/checkpoints/"
storage_path = ["file:/C:/var/db_storage_dir1/",
"file:/C:/var/db_storage_dir2/",
"file:/C:/var/db_storage_dir3/"]
expected = ["C:\\var\\db_storage_dir1",
"C:\\var\\db_storage_dir2",
"C:\\var\\db_storage_dir3"]
else:
checkpoints_path = "file://var/checkpoints/"
storage_path = ["file://var/db_storage_dir1/",
"file://var/db_storage_dir2/",
"file://var/db_storage_dir3/"]
expected = ["/db_storage_dir1",
"/db_storage_dir2",
"/db_storage_dir3"]
state_backend = RocksDBStateBackend(checkpoints_path)
state_backend.set_db_storage_paths(*storage_path)
self.assertEqual(state_backend.get_db_storage_paths(), expected)
def test_get_set_predefined_options(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED_HIGH_MEM)
state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.SPINNING_DISK_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.FLASH_SSD_OPTIMIZED)
self.assertEqual(state_backend.get_predefined_options(),
PredefinedOptions.FLASH_SSD_OPTIMIZED)
state_backend.set_predefined_options(PredefinedOptions.DEFAULT)
self.assertEqual(state_backend.get_predefined_options(), PredefinedOptions.DEFAULT)
def test_get_set_options(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
self.assertIsNone(state_backend.get_options())
state_backend.set_options(
"org.apache.flink.contrib.streaming.state.DefaultConfigurableOptionsFactory")
self.assertEqual(state_backend.get_options(),
"org.apache.flink.contrib.streaming.state."
"DefaultConfigurableOptionsFactory")
def test_get_set_number_of_transfering_threads(self):
state_backend = RocksDBStateBackend("file://var/checkpoints/")
state_backend.set_number_of_transfering_threads(7)
self.assertEqual(state_backend.get_number_of_transfering_threads(), 7)
class CustomStateBackendTests(PyFlinkTestCase):
def test_create_custom_state_backend(self):
gateway = get_gateway()
JConfiguration = gateway.jvm.org.apache.flink.configuration.Configuration
j_config = JConfiguration()
j_factory = load_java_class("org.apache.flink.streaming.runtime.tasks."
"StreamTaskTest$TestMemoryStateBackendFactory").newInstance()
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
state_backend = _from_j_state_backend(j_factory.createFromConfig(j_config,
context_classloader))
self.assertIsInstance(state_backend, CustomStateBackend)
| 11,642 | 39.996479 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_checkpoint_storage.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.checkpoint_storage import (JobManagerCheckpointStorage,
FileSystemCheckpointStorage)
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkTestCase
class JobManagerCheckpointStorageTests(PyFlinkTestCase):
def test_constant(self):
gateway = get_gateway()
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.JobManagerCheckpointStorage
self.assertEqual(JobManagerCheckpointStorage.DEFAULT_MAX_STATE_SIZE,
JJobManagerCheckpointStorage.DEFAULT_MAX_STATE_SIZE)
def test_create_jobmanager_checkpoint_storage(self):
self.assertIsNotNone(JobManagerCheckpointStorage())
self.assertIsNotNone(JobManagerCheckpointStorage("file://var/checkpoints/"))
self.assertIsNotNone(JobManagerCheckpointStorage(
"file://var/checkpoints/", 10000000))
def test_get_max_state_size(self):
checkpoint_storage = JobManagerCheckpointStorage()
self.assertEqual(checkpoint_storage.get_max_state_size(),
JobManagerCheckpointStorage.DEFAULT_MAX_STATE_SIZE)
checkpoint_storage = JobManagerCheckpointStorage(max_state_size=50000)
self.assertEqual(checkpoint_storage.get_max_state_size(), 50000)
class FileSystemCheckpointStorageTests(PyFlinkTestCase):
def test_create_fs_checkpoint_storage(self):
self.assertIsNotNone(FileSystemCheckpointStorage("file://var/checkpoints/"))
self.assertIsNotNone(FileSystemCheckpointStorage("file://var/checkpoints/", 2048))
self.assertIsNotNone(FileSystemCheckpointStorage(
"file://var/checkpoints/", 2048, 4096))
def test_get_min_file_size_threshold(self):
checkpoint_storage = FileSystemCheckpointStorage("file://var/checkpoints/")
self.assertEqual(checkpoint_storage.get_min_file_size_threshold(), 20480)
checkpoint_storage = FileSystemCheckpointStorage("file://var/checkpoints/",
file_state_size_threshold=2048)
self.assertEqual(checkpoint_storage.get_min_file_size_threshold(), 2048)
def test_get_checkpoint_path(self):
checkpoint_storage = FileSystemCheckpointStorage("file://var/checkpoints/")
self.assertEqual(checkpoint_storage.get_checkpoint_path(), "file://var/checkpoints")
| 3,437 | 40.421687 | 92 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/tests/test_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Iterable, Tuple, Dict
from pyflink.common import Configuration
from pyflink.common.time import Time
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import WatermarkStrategy, TimestampAssigner
from pyflink.datastream.data_stream import DataStream
from pyflink.datastream.functions import (ProcessWindowFunction, WindowFunction, AggregateFunction,
ProcessAllWindowFunction)
from pyflink.datastream.output_tag import OutputTag
from pyflink.datastream.window import (TumblingEventTimeWindows,
SlidingEventTimeWindows, EventTimeSessionWindows,
CountSlidingWindowAssigner, SessionWindowTimeGapExtractor,
CountWindow, PurgingTrigger, EventTimeTrigger, TimeWindow,
GlobalWindows, CountTrigger)
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
from pyflink.util.java_utils import get_j_env_configuration
class WindowTests(object):
def setUp(self) -> None:
super(WindowTests, self).setUp()
self.test_sink = DataStreamTestSinkFunction()
def tearDown(self) -> None:
self.test_sink.clear()
def assert_equals_sorted(self, expected, actual):
expected.sort()
actual.sort()
self.assertEqual(expected, actual)
def test_event_time_tumbling_window(self):
data_stream = self.env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 5), ('hi', 8), ('hi', 9),
('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(TumblingEventTimeWindows.of(Time.milliseconds(5))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_event_time_tumbling_window')
results = self.test_sink.get_results()
expected = ['(hi,0,5,4)', '(hi,5,10,3)', '(hi,15,20,1)']
self.assert_equals_sorted(expected, results)
def test_count_tumbling_window(self):
data_stream = self.env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello'),
(6, 'hello')],
type_info=Types.TUPLE([Types.INT(), Types.STRING()])) # type: DataStream
data_stream.key_by(lambda x: x[1], key_type=Types.STRING()) \
.count_window(3) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_count_tumbling_window')
results = self.test_sink.get_results()
expected = ['(hi,9)', '(hello,12)']
self.assert_equals_sorted(expected, results)
def test_event_time_sliding_window(self):
data_stream = self.env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 5), ('hi', 8), ('hi', 9),
('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(SlidingEventTimeWindows.of(Time.milliseconds(5), Time.milliseconds(2))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_event_time_sliding_window')
results = self.test_sink.get_results()
expected = ['(hi,-2,3,2)', '(hi,0,5,4)', '(hi,2,7,4)', '(hi,4,9,3)', '(hi,6,11,2)',
'(hi,8,13,2)', '(hi,12,17,1)', '(hi,14,19,1)']
self.assert_equals_sorted(expected, results)
def test_count_sliding_window(self):
data_stream = self.env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello')],
type_info=Types.TUPLE([Types.INT(), Types.STRING()])) # type: DataStream
data_stream.key_by(lambda x: x[1], key_type=Types.STRING()) \
.window(CountSlidingWindowAssigner(2, 1)) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_count_sliding_window')
results = self.test_sink.get_results()
expected = ['(hello,6)', '(hi,8)', '(hi,4)', '(hello,10)']
self.assert_equals_sorted(expected, results)
def test_event_time_session_window(self):
data_stream = self.env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(5))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_event_time_session_window')
results = self.test_sink.get_results()
expected = ['(hi,1,14,6)', '(hi,15,20,1)']
self.assert_equals_sorted(expected, results)
def test_event_time_dynamic_gap_session_window(self):
self.env.set_parallelism(1)
data_stream = self.env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 9), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_dynamic_gap(MySessionWindowTimeGapExtractor())) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_event_time_dynamic_gap_session_window')
results = self.test_sink.get_results()
expected = ['(hi,1,8,4)', '(hi,9,30,3)']
self.assert_equals_sorted(expected, results)
def test_window_reduce_passthrough(self):
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.reduce(lambda a, b: (b[0], a[1] + b[1]),
output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_window_reduce_passthrough')
results = self.test_sink.get_results()
expected = ['(a,3)', '(a,6)', '(a,15)', '(b,3)', '(b,17)']
self.assert_equals_sorted(expected, results)
def test_window_reduce_process(self):
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyProcessFunction(ProcessWindowFunction):
def process(self, key, context: ProcessWindowFunction.Context,
elements: Iterable[Tuple[str, int]]) -> Iterable[str]:
yield "current window start at {}, reduce result {}".format(
context.window().start,
next(iter(elements)),
)
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.reduce(lambda a, b: (b[0], a[1] + b[1]),
window_function=MyProcessFunction(),
output_type=Types.STRING()) \
.add_sink(self.test_sink)
self.env.execute('test_window_reduce_process')
results = self.test_sink.get_results()
expected = ["current window start at 1, reduce result ('a', 3)",
"current window start at 15, reduce result ('a', 15)",
"current window start at 3, reduce result ('b', 3)",
"current window start at 6, reduce result ('a', 6)",
"current window start at 8, reduce result ('b', 17)"]
self.assert_equals_sorted(expected, results)
def test_window_aggregate_passthrough(self):
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyAggregateFunction(AggregateFunction):
def create_accumulator(self) -> Tuple[str, Dict[int, int]]:
return '', {0: 0, 1: 0}
def add(self, value: Tuple[str, int], accumulator: Tuple[str, Dict[int, int]]
) -> Tuple[str, Dict[int, int]]:
number_map = accumulator[1]
number_map[value[1] % 2] += 1
return value[0], number_map
def get_result(self, accumulator: Tuple[str, Dict[int, int]]) -> Tuple[str, int]:
number_map = accumulator[1]
return accumulator[0], number_map[0] - number_map[1]
def merge(self, acc_a: Tuple[str, Dict[int, int]], acc_b: Tuple[str, Dict[int, int]]
) -> Tuple[str, Dict[int, int]]:
number_map_a = acc_a[1]
number_map_b = acc_b[1]
new_number_map = {
0: number_map_a[0] + number_map_b[0],
1: number_map_a[1] + number_map_b[1]
}
return acc_a[0], new_number_map
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.aggregate(MyAggregateFunction(),
output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_window_aggregate_passthrough')
results = self.test_sink.get_results()
expected = ['(a,-1)', '(a,0)', '(a,1)', '(b,-1)', '(b,0)']
self.assert_equals_sorted(expected, results)
def test_window_aggregate_accumulator_type(self):
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyAggregateFunction(AggregateFunction):
def create_accumulator(self) -> Tuple[int, str]:
return 0, ''
def add(self, value: Tuple[str, int], accumulator: Tuple[int, str]) -> Tuple[int, str]:
return value[1] + accumulator[0], value[0]
def get_result(self, accumulator: Tuple[str, int]):
return accumulator[1], accumulator[0]
def merge(self, acc_a: Tuple[int, str], acc_b: Tuple[int, str]):
return acc_a[0] + acc_b[0], acc_a[1]
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.aggregate(MyAggregateFunction(),
accumulator_type=Types.TUPLE([Types.INT(), Types.STRING()]),
output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_window_aggregate_accumulator_type')
results = self.test_sink.get_results()
expected = ['(a,15)', '(a,3)', '(a,6)', '(b,17)', '(b,3)']
self.assert_equals_sorted(expected, results)
def test_window_aggregate_process(self):
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyAggregateFunction(AggregateFunction):
def create_accumulator(self) -> Tuple[int, str]:
return 0, ''
def add(self, value: Tuple[str, int], accumulator: Tuple[int, str]) -> Tuple[int, str]:
return value[1] + accumulator[0], value[0]
def get_result(self, accumulator: Tuple[str, int]):
return accumulator[1], accumulator[0]
def merge(self, acc_a: Tuple[int, str], acc_b: Tuple[int, str]):
return acc_a[0] + acc_b[0], acc_a[1]
class MyProcessWindowFunction(ProcessWindowFunction):
def process(self, key: str, context: ProcessWindowFunction.Context,
elements: Iterable[Tuple[str, int]]) -> Iterable[str]:
agg_result = next(iter(elements))
yield "key {} timestamp sum {}".format(agg_result[0], agg_result[1])
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.aggregate(MyAggregateFunction(),
window_function=MyProcessWindowFunction(),
accumulator_type=Types.TUPLE([Types.INT(), Types.STRING()]),
output_type=Types.STRING()) \
.add_sink(self.test_sink)
self.env.execute('test_window_aggregate_process')
results = self.test_sink.get_results()
expected = ['key a timestamp sum 15',
'key a timestamp sum 3',
'key a timestamp sum 6',
'key b timestamp sum 17',
'key b timestamp sum 3']
self.assert_equals_sorted(expected, results)
def test_session_window_late_merge(self):
data_stream = self.env.from_collection([
('hi', 0), ('hi', 8), ('hi', 4)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(5))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_session_window_late_merge')
results = self.test_sink.get_results()
expected = ['(hi,0,13,3)']
self.assert_equals_sorted(expected, results)
def test_event_time_session_window_with_purging_trigger(self):
data_stream = self.env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(3))) \
.trigger(PurgingTrigger.of(EventTimeTrigger.create())) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_event_time_session_window_with_purging_trigger')
results = self.test_sink.get_results()
expected = ['(hi,1,7,4)', '(hi,8,12,2)', '(hi,15,18,1)']
self.assert_equals_sorted(expected, results)
def test_global_window_with_purging_trigger(self):
self.env.set_parallelism(1)
data_stream = self.env.from_collection([
('hi', 1), ('hi', 1), ('hi', 1), ('hi', 1), ('hi', 1), ('hi', 1), ('hi', 1)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyProcessFunction(ProcessWindowFunction):
def process(self, key, context: ProcessWindowFunction.Context,
elements: Iterable[Tuple[str, int]]) -> Iterable[tuple]:
return [(key, len([e for e in elements]))]
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(GlobalWindows.create()) \
.trigger(PurgingTrigger.of(CountTrigger.of(2))) \
.process(MyProcessFunction(), Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_global_window_with_purging_trigger')
results = self.test_sink.get_results()
expected = ['(hi,2)', '(hi,2)', '(hi,2)']
self.assert_equals_sorted(expected, results)
def test_event_time_tumbling_window_all(self):
data_stream = self.env.from_collection([
('hi', 1), ('hello', 2), ('hi', 3), ('hello', 4), ('hello', 5), ('hi', 8), ('hi', 9),
('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.window_all(TumblingEventTimeWindows.of(Time.milliseconds(5))) \
.process(CountAllWindowProcessFunction(),
Types.TUPLE([Types.LONG(), Types.LONG(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_event_time_tumbling_window_all')
results = self.test_sink.get_results()
expected = ['(0,5,4)', '(15,20,1)', '(5,10,3)']
self.assert_equals_sorted(expected, results)
def test_window_all_reduce(self):
self.env.set_parallelism(1)
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.window_all(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.reduce(lambda a, b: (a[0], a[1] + b[1]),
output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_window_all_reduce')
results = self.test_sink.get_results()
expected = ['(a,15)', '(a,6)', '(a,23)']
self.assert_equals_sorted(expected, results)
def test_window_all_reduce_process(self):
self.env.set_parallelism(1)
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyProcessFunction(ProcessAllWindowFunction):
def process(self, context: 'ProcessAllWindowFunction.Context',
elements: Iterable[Tuple[str, int]]) -> Iterable[str]:
yield "current window start at {}, reduce result {}".format(
context.window().start,
next(iter(elements)),
)
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.window_all(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.reduce(lambda a, b: (a[0], a[1] + b[1]),
window_function=MyProcessFunction(),
output_type=Types.STRING()) \
.add_sink(self.test_sink)
self.env.execute('test_window_all_reduce_process')
results = self.test_sink.get_results()
expected = ["current window start at 1, reduce result ('a', 6)",
"current window start at 6, reduce result ('a', 23)",
"current window start at 15, reduce result ('a', 15)"]
self.assert_equals_sorted(expected, results)
def test_window_all_aggregate(self):
self.env.set_parallelism(1)
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyAggregateFunction(AggregateFunction):
def create_accumulator(self) -> Tuple[str, Dict[int, int]]:
return '', {0: 0, 1: 0}
def add(self, value: Tuple[str, int], accumulator: Tuple[str, Dict[int, int]]
) -> Tuple[str, Dict[int, int]]:
number_map = accumulator[1]
number_map[value[1] % 2] += 1
return value[0], number_map
def get_result(self, accumulator: Tuple[str, Dict[int, int]]) -> Tuple[str, int]:
number_map = accumulator[1]
return accumulator[0], number_map[0] - number_map[1]
def merge(self, acc_a: Tuple[str, Dict[int, int]], acc_b: Tuple[str, Dict[int, int]]
) -> Tuple[str, Dict[int, int]]:
number_map_a = acc_a[1]
number_map_b = acc_b[1]
new_number_map = {
0: number_map_a[0] + number_map_b[0],
1: number_map_a[1] + number_map_b[1]
}
return acc_a[0], new_number_map
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.window_all(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.aggregate(MyAggregateFunction(),
output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.add_sink(self.test_sink)
self.env.execute('test_window_all_aggregate')
results = self.test_sink.get_results()
expected = ['(a,-1)', '(b,-1)', '(b,1)']
self.assert_equals_sorted(expected, results)
def test_window_all_aggregate_process(self):
self.env.set_parallelism(1)
data_stream = self.env.from_collection([
('a', 1), ('a', 2), ('b', 3), ('a', 6), ('b', 8), ('b', 9), ('a', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()])) # type: DataStream
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(SecondColumnTimestampAssigner())
class MyAggregateFunction(AggregateFunction):
def create_accumulator(self) -> Tuple[int, str]:
return 0, ''
def add(self, value: Tuple[str, int], accumulator: Tuple[int, str]) -> Tuple[int, str]:
return value[1] + accumulator[0], value[0]
def get_result(self, accumulator: Tuple[str, int]):
return accumulator[1], accumulator[0]
def merge(self, acc_a: Tuple[int, str], acc_b: Tuple[int, str]):
return acc_a[0] + acc_b[0], acc_a[1]
class MyProcessWindowFunction(ProcessAllWindowFunction):
def process(self, context: ProcessAllWindowFunction.Context,
elements: Iterable[Tuple[str, int]]) -> Iterable[str]:
agg_result = next(iter(elements))
yield "key {} timestamp sum {}".format(agg_result[0], agg_result[1])
data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.window_all(EventTimeSessionWindows.with_gap(Time.milliseconds(2))) \
.aggregate(MyAggregateFunction(),
window_function=MyProcessWindowFunction(),
accumulator_type=Types.TUPLE([Types.INT(), Types.STRING()]),
output_type=Types.STRING()) \
.add_sink(self.test_sink)
self.env.execute('test_window_all_aggregate_process')
results = self.test_sink.get_results()
expected = ['key b timestamp sum 6',
'key b timestamp sum 23',
'key a timestamp sum 15']
self.assert_equals_sorted(expected, results)
def test_side_output_late_data(self):
self.env.set_parallelism(1)
config = Configuration(
j_configuration=get_j_env_configuration(self.env._j_stream_execution_environment)
)
config.set_integer('python.fn-execution.bundle.size', 1)
jvm = get_gateway().jvm
watermark_strategy = WatermarkStrategy(
jvm.org.apache.flink.api.common.eventtime.WatermarkStrategy.forGenerator(
jvm.org.apache.flink.streaming.api.functions.python.eventtime.
PerElementWatermarkGenerator.getSupplier()
)
).with_timestamp_assigner(SecondColumnTimestampAssigner())
tag = OutputTag('late-data', type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds1 = self.env.from_collection([('a', 0), ('a', 8), ('a', 4), ('a', 6)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds2 = ds1.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda e: e[0]) \
.window(TumblingEventTimeWindows.of(Time.milliseconds(5))) \
.allowed_lateness(0) \
.side_output_late_data(tag) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.LONG(), Types.LONG(), Types.INT()]))
main_sink = DataStreamTestSinkFunction()
ds2.add_sink(main_sink)
side_sink = DataStreamTestSinkFunction()
ds2.get_side_output(tag).add_sink(side_sink)
self.env.execute('test_side_output_late_data')
main_expected = ['(a,0,5,1)', '(a,5,10,2)']
self.assert_equals_sorted(main_expected, main_sink.get_results())
side_expected = ['+I[a, 4]']
self.assert_equals_sorted(side_expected, side_sink.get_results())
class ProcessWindowTests(WindowTests, PyFlinkStreamingTestCase):
def setUp(self) -> None:
super(ProcessWindowTests, self).setUp()
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("python.execution-mode", "process")
class EmbeddedWindowTests(WindowTests, PyFlinkStreamingTestCase):
def setUp(self) -> None:
super(EmbeddedWindowTests, self).setUp()
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("python.execution-mode", "thread")
def test_chained_window(self):
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value: tuple, record_timestamp: int) -> int:
return value[0]
ds = self.env.from_collection(
[(1676461680000, "a1", "b1", 1), (1676461680000, "a1", "b1", 1),
(1676461680000, "a2", "b2", 1), (1676461680000, "a1", "b2", 1),
(1676461740000, "a1", "b1", 1), (1676461740000, "a2", "b2", 1)]
).assign_timestamps_and_watermarks(
WatermarkStrategy.for_monotonous_timestamps().with_timestamp_assigner(
MyTimestampAssigner())
)
ds.key_by(
lambda x: (x[0], x[1], x[2])
).window(
TumblingEventTimeWindows.of(Time.minutes(1))
).reduce(
lambda x, y: (x[0], x[1], x[2], x[3] + y[3]),
output_type=Types.TUPLE([Types.LONG(), Types.STRING(), Types.STRING(), Types.INT()])
).map(
lambda x: (x[0], x[1], x[3]),
output_type=Types.TUPLE([Types.LONG(), Types.STRING(), Types.INT()])
).add_sink(self.test_sink)
self.env.execute('test_chained_window')
results = self.test_sink.get_results()
expected = ['(1676461680000,a1,1)',
'(1676461680000,a1,2)',
'(1676461680000,a2,1)',
'(1676461740000,a1,1)',
'(1676461740000,a2,1)']
self.assert_equals_sorted(expected, results)
class SecondColumnTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
class MySessionWindowTimeGapExtractor(SessionWindowTimeGapExtractor):
def extract(self, element: tuple) -> int:
return element[1]
class SumWindowFunction(WindowFunction[tuple, tuple, str, CountWindow]):
def apply(self, key: str, window: CountWindow, inputs: Iterable[tuple]):
result = 0
for i in inputs:
result += i[0]
return [(key, result)]
class CountWindowProcessFunction(ProcessWindowFunction[tuple, tuple, str, TimeWindow]):
def process(self,
key: str,
context: ProcessWindowFunction.Context[TimeWindow],
elements: Iterable[tuple]) -> Iterable[tuple]:
return [(key, context.window().start, context.window().end, len([e for e in elements]))]
class CountAllWindowProcessFunction(ProcessAllWindowFunction[tuple, tuple, TimeWindow]):
def process(self,
context: 'ProcessAllWindowFunction.Context',
elements: Iterable[tuple]) -> Iterable[tuple]:
return [(context.window().start, context.window().end, len([e for e in elements]))]
| 33,470 | 48.513314 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/base.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from enum import Enum
from typing import Union, Optional
from py4j.java_gateway import JavaObject
from pyflink.datastream.functions import JavaFunctionWrapper
from pyflink.java_gateway import get_gateway
class Source(JavaFunctionWrapper):
"""
Base class for all unified data source in Flink.
"""
def __init__(self, source: Union[str, JavaObject]):
"""
Constructor of Source.
:param source: The java Source object.
"""
super(Source, self).__init__(source)
class Sink(JavaFunctionWrapper):
"""
Base class for all unified data sink in Flink.
"""
def __init__(self, sink: Union[str, JavaObject]):
"""
Constructor of Sink.
:param sink: The java Sink object.
"""
super(Sink, self).__init__(sink)
class DeliveryGuarantee(Enum):
"""
DeliverGuarantees that can be chosen. In general your pipeline can only offer the lowest
delivery guarantee which is supported by your sources and sinks.
:data: `EXACTLY_ONCE`:
Records are only delivered exactly-once also under failover scenarios. To build a complete
exactly-once pipeline is required that the source and sink support exactly-once and are
properly configured.
:data: `AT_LEAST_ONCE`:
Records are ensured to be delivered but it may happen that the same record is delivered
multiple times. Usually, this guarantee is faster than the exactly-once delivery.
:data: `NONE`:
Records are delivered on a best effort basis. It is often the fastest way to process records
but it may happen that records are lost or duplicated.
"""
EXACTLY_ONCE = 0,
AT_LEAST_ONCE = 1,
NONE = 2
def _to_j_delivery_guarantee(self):
JDeliveryGuarantee = get_gateway().jvm \
.org.apache.flink.connector.base.DeliveryGuarantee
return getattr(JDeliveryGuarantee, self.name)
class StreamTransformer(ABC):
@abstractmethod
def apply(self, ds):
pass
class SupportsPreprocessing(ABC):
@abstractmethod
def get_transformer(self) -> Optional[StreamTransformer]:
pass
| 3,132 | 30.33 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/file_system.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import warnings
from abc import abstractmethod
from typing import TYPE_CHECKING, Optional
from pyflink.common.serialization import BulkWriterFactory, RowDataBulkWriterFactory
if TYPE_CHECKING:
from pyflink.table.types import RowType
from pyflink.common import Duration, Encoder
from pyflink.datastream.connectors import Source, Sink
from pyflink.datastream.connectors.base import SupportsPreprocessing, StreamTransformer
from pyflink.datastream.functions import SinkFunction
from pyflink.common.utils import JavaObjectWrapper
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_jarray, is_instance_of
__all__ = [
'FileCompactor',
'FileCompactStrategy',
'OutputFileConfig',
'FileSource',
'FileSourceBuilder',
'FileSink',
'StreamingFileSink',
'StreamFormat',
'BulkFormat',
'FileEnumeratorProvider',
'FileSplitAssignerProvider',
'RollingPolicy',
'BucketAssigner'
]
# ---- FileSource ----
class FileEnumeratorProvider(object):
"""
Factory for FileEnumerator which task is to discover all files to be read and to split them
into a set of file source splits. This includes possibly, path traversals, file filtering
(by name or other patterns) and deciding whether to split files into multiple splits, and
how to split them.
"""
def __init__(self, j_file_enumerator_provider):
self._j_file_enumerator_provider = j_file_enumerator_provider
@staticmethod
def default_splittable_file_enumerator() -> 'FileEnumeratorProvider':
"""
The default file enumerator used for splittable formats. The enumerator recursively
enumerates files, split files that consist of multiple distributed storage blocks into
multiple splits, and filters hidden files (files starting with '.' or '_'). Files with
suffixes of common compression formats (for example '.gzip', '.bz2', '.xy', '.zip', ...)
will not be split.
"""
JFileSource = get_gateway().jvm.org.apache.flink.connector.file.src.FileSource
return FileEnumeratorProvider(JFileSource.DEFAULT_SPLITTABLE_FILE_ENUMERATOR)
@staticmethod
def default_non_splittable_file_enumerator() -> 'FileEnumeratorProvider':
"""
The default file enumerator used for non-splittable formats. The enumerator recursively
enumerates files, creates one split for the file, and filters hidden files
(files starting with '.' or '_').
"""
JFileSource = get_gateway().jvm.org.apache.flink.connector.file.src.FileSource
return FileEnumeratorProvider(JFileSource.DEFAULT_NON_SPLITTABLE_FILE_ENUMERATOR)
class FileSplitAssignerProvider(object):
"""
Factory for FileSplitAssigner which is responsible for deciding what split should be
processed next by which node. It determines split processing order and locality.
"""
def __init__(self, j_file_split_assigner):
self._j_file_split_assigner = j_file_split_assigner
@staticmethod
def locality_aware_split_assigner() -> 'FileSplitAssignerProvider':
"""
A FileSplitAssigner that assigns to each host preferably splits that are local, before
assigning splits that are not local.
"""
JFileSource = get_gateway().jvm.org.apache.flink.connector.file.src.FileSource
return FileSplitAssignerProvider(JFileSource.DEFAULT_SPLIT_ASSIGNER)
class StreamFormat(object):
"""
A reader format that reads individual records from a stream.
Compared to the :class:`~BulkFormat`, the stream format handles a few things out-of-the-box,
like deciding how to batch records or dealing with compression.
Internally in the file source, the readers pass batches of records from the reading threads
(that perform the typically blocking I/O operations) to the async mailbox threads that do
the streaming and batch data processing. Passing records in batches
(rather than one-at-a-time) much reduces the thread-to-thread handover overhead.
This batching is by default based on I/O fetch size for the StreamFormat, meaning the
set of records derived from one I/O buffer will be handed over as one. See config option
`source.file.stream.io-fetch-size` to configure that fetch size.
"""
def __init__(self, j_stream_format):
self._j_stream_format = j_stream_format
@staticmethod
def text_line_format(charset_name: str = "UTF-8") -> 'StreamFormat':
"""
Creates a reader format that text lines from a file.
The reader uses Java's built-in java.io.InputStreamReader to decode the byte stream
using various supported charset encodings.
This format does not support optimized recovery from checkpoints. On recovery, it will
re-read and discard the number of lined that were processed before the last checkpoint.
That is due to the fact that the offsets of lines in the file cannot be tracked through
the charset decoders with their internal buffering of stream input and charset decoder
state.
:param charset_name: The charset to decode the byte stream.
"""
j_stream_format = get_gateway().jvm.org.apache.flink.connector.file.src.reader. \
TextLineInputFormat(charset_name)
return StreamFormat(j_stream_format)
class BulkFormat(object):
"""
The BulkFormat reads and decodes batches of records at a time. Examples of bulk formats are
formats like ORC or Parquet.
Internally in the file source, the readers pass batches of records from the reading threads
(that perform the typically blocking I/O operations) to the async mailbox threads that do the
streaming and batch data processing. Passing records in batches (rather than one-at-a-time) much
reduce the thread-to-thread handover overhead.
For the BulkFormat, one batch is handed over as one.
.. versionadded:: 1.16.0
"""
def __init__(self, j_bulk_format):
self._j_bulk_format = j_bulk_format
class FileSourceBuilder(object):
"""
The builder for the :class:`~FileSource`, to configure the various behaviors.
Start building the source via one of the following methods:
- :func:`~FileSource.for_record_stream_format`
"""
def __init__(self, j_file_source_builder):
self._j_file_source_builder = j_file_source_builder
def monitor_continuously(
self,
discovery_interval: Duration) -> 'FileSourceBuilder':
"""
Sets this source to streaming ("continuous monitoring") mode.
This makes the source a "continuous streaming" source that keeps running, monitoring
for new files, and reads these files when they appear and are discovered by the
monitoring.
The interval in which the source checks for new files is the discovery_interval. Shorter
intervals mean that files are discovered more quickly, but also imply more frequent
listing or directory traversal of the file system / object store.
"""
self._j_file_source_builder.monitorContinuously(discovery_interval._j_duration)
return self
def process_static_file_set(self) -> 'FileSourceBuilder':
"""
Sets this source to bounded (batch) mode.
In this mode, the source processes the files that are under the given paths when the
application is started. Once all files are processed, the source will finish.
This setting is also the default behavior. This method is mainly here to "switch back"
to bounded (batch) mode, or to make it explicit in the source construction.
"""
self._j_file_source_builder.processStaticFileSet()
return self
def set_file_enumerator(
self,
file_enumerator: 'FileEnumeratorProvider') -> 'FileSourceBuilder':
"""
Configures the FileEnumerator for the source. The File Enumerator is responsible
for selecting from the input path the set of files that should be processed (and which
to filter out). Furthermore, the File Enumerator may split the files further into
sub-regions, to enable parallelization beyond the number of files.
"""
self._j_file_source_builder.setFileEnumerator(
file_enumerator._j_file_enumerator_provider)
return self
def set_split_assigner(
self,
split_assigner: 'FileSplitAssignerProvider') -> 'FileSourceBuilder':
"""
Configures the FileSplitAssigner for the source. The File Split Assigner
determines which parallel reader instance gets which {@link FileSourceSplit}, and in
which order these splits are assigned.
"""
self._j_file_source_builder.setSplitAssigner(split_assigner._j_file_split_assigner)
return self
def build(self) -> 'FileSource':
"""
Creates the file source with the settings applied to this builder.
"""
return FileSource(self._j_file_source_builder.build())
class FileSource(Source):
"""
A unified data source that reads files - both in batch and in streaming mode.
This source supports all (distributed) file systems and object stores that can be accessed via
the Flink's FileSystem class.
Start building a file source via one of the following calls:
- :func:`~FileSource.for_record_stream_format`
This creates a :class:`~FileSource.FileSourceBuilder` on which you can configure all the
properties of the file source.
<h2>Batch and Streaming</h2>
This source supports both bounded/batch and continuous/streaming data inputs. For the
bounded/batch case, the file source processes all files under the given path(s). In the
continuous/streaming case, the source periodically checks the paths for new files and will start
reading those.
When you start creating a file source (via the
:class:`~FileSource.FileSourceBuilder` created through one of the above-mentioned methods)
the source is by default in bounded/batch mode. Call
:func:`~FileSource.FileSourceBuilder.monitor_continuously` to put the source into continuous
streaming mode.
<h2>Format Types</h2>
The reading of each file happens through file readers defined by <i>file formats</i>. These
define the parsing logic for the contents of the file. There are multiple classes that the
source supports. Their interfaces trade of simplicity of implementation and
flexibility/efficiency.
- A :class:`~FileSource.StreamFormat` reads the contents of a file from a file stream.
It is the simplest format to implement, and provides many features out-of-the-box
(like checkpointing logic) but is limited in the optimizations it
can apply (such as object reuse, batching, etc.).
<h2>Discovering / Enumerating Files</h2>
The way that the source lists the files to be processes is defined by the
:class:`~FileSource.FileEnumeratorProvider`. The FileEnumeratorProvider is responsible to
select the relevant files (for example filter out hidden files) and to optionally splits files
into multiple regions (= file source splits) that can be read in parallel).
"""
def __init__(self, j_file_source):
super(FileSource, self).__init__(source=j_file_source)
@staticmethod
def for_record_stream_format(stream_format: StreamFormat, *paths: str) -> FileSourceBuilder:
"""
Builds a new FileSource using a :class:`~FileSource.StreamFormat` to read record-by-record
from a file stream.
When possible, stream-based formats are generally easier (preferable) to file-based
formats, because they support better default behavior around I/O batching or progress
tracking (checkpoints).
Stream formats also automatically de-compress files based on the file extension. This
supports files ending in ".deflate" (Deflate), ".xz" (XZ), ".bz2" (BZip2), ".gz", ".gzip"
(GZip).
"""
JPath = get_gateway().jvm.org.apache.flink.core.fs.Path
JFileSource = get_gateway().jvm.org.apache.flink.connector.file.src.FileSource
j_paths = to_jarray(JPath, [JPath(p) for p in paths])
return FileSourceBuilder(
JFileSource.forRecordStreamFormat(stream_format._j_stream_format, j_paths))
@staticmethod
def for_bulk_file_format(bulk_format: BulkFormat, *paths: str) -> FileSourceBuilder:
JPath = get_gateway().jvm.org.apache.flink.core.fs.Path
JFileSource = get_gateway().jvm.org.apache.flink.connector.file.src.FileSource
j_paths = to_jarray(JPath, [JPath(p) for p in paths])
return FileSourceBuilder(
JFileSource.forBulkFileFormat(bulk_format._j_bulk_format, j_paths))
# ---- FileSink ----
class BucketAssigner(JavaObjectWrapper):
"""
A BucketAssigner is used with a file sink to determine the bucket each incoming element should
be put into.
The StreamingFileSink can be writing to many buckets at a time, and it is responsible
for managing a set of active buckets. Whenever a new element arrives it will ask the
BucketAssigner for the bucket the element should fall in. The BucketAssigner can, for
example, determine buckets based on system time.
"""
def __init__(self, j_bucket_assigner):
super().__init__(j_bucket_assigner)
@staticmethod
def base_path_bucket_assigner() -> 'BucketAssigner':
"""
Creates a BucketAssigner that does not perform any bucketing of files. All files are
written to the base path.
"""
return BucketAssigner(get_gateway().jvm.org.apache.flink.streaming.api.functions.sink.
filesystem.bucketassigners.BasePathBucketAssigner())
@staticmethod
def date_time_bucket_assigner(format_str: str = "yyyy-MM-dd--HH", timezone_id: str = None):
"""
Creates a BucketAssigner that assigns to buckets based on current system time.
It will create directories of the following form: /{basePath}/{dateTimePath}/}.
The basePath is the path that was specified as a base path when creating the new bucket.
The dateTimePath is determined based on the current system time and the user provided format
string.
The Java DateTimeFormatter is used to derive a date string from the current system time and
the date format string. The default format string is "yyyy-MM-dd--HH" so the rolling files
will have a granularity of hours.
:param format_str: The format string used to determine the bucket id.
:param timezone_id: The timezone id, either an abbreviation such as "PST", a full name
such as "America/Los_Angeles", or a custom timezone_id such as
"GMT-08:00". Th e default time zone will b used if it's None.
"""
if timezone_id is not None and isinstance(timezone_id, str):
j_timezone = get_gateway().jvm.java.time.ZoneId.of(timezone_id)
else:
j_timezone = get_gateway().jvm.java.time.ZoneId.systemDefault()
return BucketAssigner(
get_gateway().jvm.org.apache.flink.streaming.api.functions.sink.
filesystem.bucketassigners.DateTimeBucketAssigner(format_str, j_timezone))
class RollingPolicy(JavaObjectWrapper):
"""
The policy based on which a Bucket in the FileSink rolls its currently
open part file and opens a new one.
"""
def __init__(self, j_rolling_policy):
super().__init__(j_rolling_policy)
@staticmethod
def default_rolling_policy(
part_size: int = 1024 * 1024 * 128,
rollover_interval: int = 60 * 1000,
inactivity_interval: int = 60 * 1000) -> 'DefaultRollingPolicy':
"""
Returns the default implementation of the RollingPolicy.
This policy rolls a part file if:
- there is no open part file,
- the current file has reached the maximum bucket size (by default 128MB),
- the current file is older than the roll over interval (by default 60 sec), or
- the current file has not been written to for more than the allowed inactivityTime (by
default 60 sec).
:param part_size: The maximum part file size before rolling.
:param rollover_interval: The maximum time duration a part file can stay open before
rolling.
:param inactivity_interval: The time duration of allowed inactivity after which a part file
will have to roll.
"""
JDefaultRollingPolicy = get_gateway().jvm.org.apache.flink.streaming.api.functions.\
sink.filesystem.rollingpolicies.DefaultRollingPolicy
j_rolling_policy = JDefaultRollingPolicy.builder()\
.withMaxPartSize(part_size) \
.withRolloverInterval(rollover_interval) \
.withInactivityInterval(inactivity_interval) \
.build()
return DefaultRollingPolicy(j_rolling_policy)
@staticmethod
def on_checkpoint_rolling_policy() -> 'OnCheckpointRollingPolicy':
"""
Returns a RollingPolicy which rolls (ONLY) on every checkpoint.
"""
JOnCheckpointRollingPolicy = get_gateway().jvm.org.apache.flink.streaming.api.functions. \
sink.filesystem.rollingpolicies.OnCheckpointRollingPolicy
return OnCheckpointRollingPolicy(JOnCheckpointRollingPolicy.build())
class DefaultRollingPolicy(RollingPolicy):
"""
The default implementation of the RollingPolicy.
This policy rolls a part file if:
- there is no open part file,
- the current file has reached the maximum bucket size (by default 128MB),
- the current file is older than the roll over interval (by default 60 sec), or
- the current file has not been written to for more than the allowed inactivityTime (by
default 60 sec).
"""
def __init__(self, j_rolling_policy):
super().__init__(j_rolling_policy)
class OnCheckpointRollingPolicy(RollingPolicy):
"""
A RollingPolicy which rolls (ONLY) on every checkpoint.
"""
def __init__(self, j_rolling_policy):
super().__init__(j_rolling_policy)
class OutputFileConfig(JavaObjectWrapper):
"""
Part file name configuration.
This allow to define a prefix and a suffix to the part file name.
"""
@staticmethod
def builder():
return OutputFileConfig.OutputFileConfigBuilder()
def __init__(self, part_prefix: str, part_suffix: str):
filesystem = get_gateway().jvm.org.apache.flink.streaming.api.functions.sink.filesystem
self._j_output_file_config = filesystem.OutputFileConfig(part_prefix, part_suffix)
super().__init__(self._j_output_file_config)
def get_part_prefix(self) -> str:
"""
The prefix for the part name.
"""
return self._j_output_file_config.getPartPrefix()
def get_part_suffix(self) -> str:
"""
The suffix for the part name.
"""
return self._j_output_file_config.getPartSuffix()
class OutputFileConfigBuilder(object):
"""
A builder to create the part file configuration.
"""
def __init__(self):
self.part_prefix = "part"
self.part_suffix = ""
def with_part_prefix(self, prefix) -> 'OutputFileConfig.OutputFileConfigBuilder':
self.part_prefix = prefix
return self
def with_part_suffix(self, suffix) -> 'OutputFileConfig.OutputFileConfigBuilder':
self.part_suffix = suffix
return self
def build(self) -> 'OutputFileConfig':
return OutputFileConfig(self.part_prefix, self.part_suffix)
class FileCompactStrategy(JavaObjectWrapper):
"""
Strategy for compacting the files written in {@link FileSink} before committing.
.. versionadded:: 1.16.0
"""
def __init__(self, j_file_compact_strategy):
super().__init__(j_file_compact_strategy)
@staticmethod
def builder() -> 'FileCompactStrategy.Builder':
return FileCompactStrategy.Builder()
class Builder(object):
def __init__(self):
JFileCompactStrategy = get_gateway().jvm.org.apache.flink.connector.file.sink.\
compactor.FileCompactStrategy
self._j_builder = JFileCompactStrategy.Builder.newBuilder()
def build(self) -> 'FileCompactStrategy':
return FileCompactStrategy(self._j_builder.build())
def enable_compaction_on_checkpoint(self, num_checkpoints_before_compaction: int) \
-> 'FileCompactStrategy.Builder':
"""
Optional, compaction will be triggered when N checkpoints passed since the last
triggering, -1 by default indicating no compaction on checkpoint.
"""
self._j_builder.enableCompactionOnCheckpoint(num_checkpoints_before_compaction)
return self
def set_size_threshold(self, size_threshold: int) -> 'FileCompactStrategy.Builder':
"""
Optional, compaction will be triggered when the total size of compacting files reaches
the threshold. -1 by default, indicating the size is unlimited.
"""
self._j_builder.setSizeThreshold(size_threshold)
return self
def set_num_compact_threads(self, num_compact_threads: int) \
-> 'FileCompactStrategy.Builder':
"""
Optional, the count of compacting threads in a compactor operator, 1 by default.
"""
self._j_builder.setNumCompactThreads(num_compact_threads)
return self
class FileCompactor(JavaObjectWrapper):
"""
The FileCompactor is responsible for compacting files into one file.
.. versionadded:: 1.16.0
"""
def __init__(self, j_file_compactor):
super().__init__(j_file_compactor)
@staticmethod
def concat_file_compactor(file_delimiter: bytes = None):
"""
Returns a file compactor that simply concat the compacting files. The file_delimiter will be
added between neighbouring files if provided.
"""
JConcatFileCompactor = get_gateway().jvm.org.apache.flink.connector.file.sink.compactor.\
ConcatFileCompactor
if file_delimiter:
return FileCompactor(JConcatFileCompactor(file_delimiter))
else:
return FileCompactor(JConcatFileCompactor())
@staticmethod
def identical_file_compactor():
"""
Returns a file compactor that directly copy the content of the only input file to the
output.
"""
JIdenticalFileCompactor = get_gateway().jvm.org.apache.flink.connector.file.sink.compactor.\
IdenticalFileCompactor
return FileCompactor(JIdenticalFileCompactor())
class FileSink(Sink, SupportsPreprocessing):
"""
A unified sink that emits its input elements to FileSystem files within buckets. This
sink achieves exactly-once semantics for both BATCH and STREAMING.
When creating the sink a basePath must be specified. The base directory contains one
directory for every bucket. The bucket directories themselves contain several part files, with
at least one for each parallel subtask of the sink which is writing data to that bucket.
These part files contain the actual output data.
The sink uses a BucketAssigner to determine in which bucket directory each element
should be written to inside the base directory. The BucketAssigner can, for example, roll
on every checkpoint or use time or a property of the element to determine the bucket directory.
The default BucketAssigner is a DateTimeBucketAssigner which will create one new
bucket every hour. You can specify a custom BucketAssigner using the
:func:`~FileSink.RowFormatBuilder.with_bucket_assigner`, after calling
:class:`~FileSink.for_row_format`.
The names of the part files could be defined using OutputFileConfig. This
configuration contains a part prefix and a part suffix that will be used with a random uid
assigned to each subtask of the sink and a rolling counter to determine the file names. For
example with a prefix "prefix" and a suffix ".ext", a file named {@code
"prefix-81fc4980-a6af-41c8-9937-9939408a734b-17.ext"} contains the data from subtask with uid
{@code 81fc4980-a6af-41c8-9937-9939408a734b} of the sink and is the {@code 17th} part-file
created by that subtask.
Part files roll based on the user-specified RollingPolicy. By default, a DefaultRollingPolicy
is used for row-encoded sink output; a OnCheckpointRollingPolicy is
used for bulk-encoded sink output.
In some scenarios, the open buckets are required to change based on time. In these cases, the
user can specify a bucket_check_interval (by default 1m) and the sink will check
periodically and roll the part file if the specified rolling policy says so.
Part files can be in one of three states: in-progress, pending or finished. The reason for this
is how the sink works to provide exactly-once semantics and fault-tolerance. The part file that
is currently being written to is in-progress. Once a part file is closed for writing it becomes
pending. When a checkpoint is successful (for STREAMING) or at the end of the job (for BATCH)
the currently pending files will be moved to finished.
For STREAMING in order to guarantee exactly-once semantics in case of a failure, the
sink should roll back to the state it had when that last successful checkpoint occurred. To this
end, when restoring, the restored files in pending state are transferred into the finished state
while any in-progress files are rolled back, so that they do not contain data that arrived after
the checkpoint from which we restore.
"""
def __init__(self, j_file_sink, transformer: Optional[StreamTransformer] = None):
super(FileSink, self).__init__(sink=j_file_sink)
self._transformer = transformer
def get_transformer(self) -> Optional[StreamTransformer]:
return self._transformer
class BaseBuilder(object):
def __init__(self, j_builder):
self._j_builder = j_builder
def with_bucket_check_interval(self, interval: int):
"""
:param interval: The check interval in milliseconds.
"""
self._j_builder.withBucketCheckInterval(interval)
return self
def with_bucket_assigner(self, bucket_assigner: BucketAssigner):
self._j_builder.withBucketAssigner(bucket_assigner.get_java_object())
return self
def with_output_file_config(self, output_file_config: OutputFileConfig):
self._j_builder.withOutputFileConfig(output_file_config.get_java_object())
return self
def enable_compact(self, strategy: FileCompactStrategy, compactor: FileCompactor):
self._j_builder.enableCompact(strategy.get_java_object(), compactor.get_java_object())
return self
def disable_compact(self):
self._j_builder.disableCompact()
return self
@abstractmethod
def with_rolling_policy(self, rolling_policy):
pass
def build(self):
return FileSink(self._j_builder.build())
class RowFormatBuilder(BaseBuilder):
"""
Builder for the vanilla FileSink using a row format.
.. versionchanged:: 1.16.0
Support compaction.
"""
def __init__(self, j_row_format_builder):
super().__init__(j_row_format_builder)
def with_rolling_policy(self, rolling_policy: RollingPolicy):
self._j_builder.withRollingPolicy(rolling_policy.get_java_object())
return self
@staticmethod
def for_row_format(base_path: str, encoder: Encoder) -> 'FileSink.RowFormatBuilder':
JPath = get_gateway().jvm.org.apache.flink.core.fs.Path
JFileSink = get_gateway().jvm.org.apache.flink.connector.file.sink.FileSink
return FileSink.RowFormatBuilder(
JFileSink.forRowFormat(JPath(base_path), encoder._j_encoder))
class BulkFormatBuilder(BaseBuilder):
"""
Builder for the vanilla FileSink using a bulk format.
.. versionadded:: 1.16.0
"""
def __init__(self, j_bulk_format_builder):
super().__init__(j_bulk_format_builder)
self._transformer = None
def with_rolling_policy(self, rolling_policy: OnCheckpointRollingPolicy):
if not isinstance(rolling_policy, OnCheckpointRollingPolicy):
raise ValueError('rolling_policy must be OnCheckpointRollingPolicy for bulk format')
return self
def _with_row_type(self, row_type: 'RowType') -> 'FileSink.BulkFormatBuilder':
from pyflink.datastream.data_stream import DataStream
from pyflink.table.types import _to_java_data_type
def _check_if_row_data_type(ds) -> bool:
j_type_info = ds._j_data_stream.getType()
if not is_instance_of(
j_type_info,
'org.apache.flink.table.runtime.typeutils.InternalTypeInfo'
):
return False
return is_instance_of(
j_type_info.toLogicalType(),
'org.apache.flink.table.types.logical.RowType'
)
class RowRowTransformer(StreamTransformer):
def apply(self, ds):
jvm = get_gateway().jvm
if _check_if_row_data_type(ds):
return ds
j_map_function = jvm.org.apache.flink.python.util.PythonConnectorUtils \
.RowRowMapper(_to_java_data_type(row_type))
return DataStream(ds._j_data_stream.process(j_map_function))
self._transformer = RowRowTransformer()
return self
def build(self) -> 'FileSink':
return FileSink(self._j_builder.build(), self._transformer)
@staticmethod
def for_bulk_format(base_path: str, writer_factory: BulkWriterFactory) \
-> 'FileSink.BulkFormatBuilder':
jvm = get_gateway().jvm
j_path = jvm.org.apache.flink.core.fs.Path(base_path)
JFileSink = jvm.org.apache.flink.connector.file.sink.FileSink
builder = FileSink.BulkFormatBuilder(
JFileSink.forBulkFormat(j_path, writer_factory.get_java_object())
)
if isinstance(writer_factory, RowDataBulkWriterFactory):
return builder._with_row_type(writer_factory.get_row_type())
else:
return builder
# ---- StreamingFileSink ----
class StreamingFileSink(SinkFunction):
"""
Sink that emits its input elements to `FileSystem` files within buckets. This is
integrated with the checkpointing mechanism to provide exactly once semantics.
When creating the sink a `basePath` must be specified. The base directory contains
one directory for every bucket. The bucket directories themselves contain several part files,
with at least one for each parallel subtask of the sink which is writing data to that bucket.
These part files contain the actual output data.
"""
def __init__(self, j_obj):
warnings.warn("Deprecated in 1.15. Use FileSink instead.", DeprecationWarning)
super(StreamingFileSink, self).__init__(j_obj)
class BaseBuilder(object):
def __init__(self, j_builder):
self._j_builder = j_builder
def with_bucket_check_interval(self, interval: int):
self._j_builder.withBucketCheckInterval(interval)
return self
def with_bucket_assigner(self, bucket_assigner: BucketAssigner):
self._j_builder.withBucketAssigner(bucket_assigner.get_java_object())
return self
@abstractmethod
def with_rolling_policy(self, policy):
pass
def with_output_file_config(self, output_file_config: OutputFileConfig):
self._j_builder.withOutputFileConfig(output_file_config.get_java_object())
return self
def build(self) -> 'StreamingFileSink':
j_stream_file_sink = self._j_builder.build()
return StreamingFileSink(j_stream_file_sink)
class DefaultRowFormatBuilder(BaseBuilder):
"""
Builder for the vanilla `StreamingFileSink` using a row format.
"""
def __init__(self, j_default_row_format_builder):
super().__init__(j_default_row_format_builder)
def with_rolling_policy(self, policy: RollingPolicy):
self._j_builder.withRollingPolicy(policy.get_java_object())
return self
@staticmethod
def for_row_format(base_path: str, encoder: Encoder) -> 'DefaultRowFormatBuilder':
j_path = get_gateway().jvm.org.apache.flink.core.fs.Path(base_path)
j_default_row_format_builder = get_gateway().jvm.org.apache.flink.streaming.api.\
functions.sink.filesystem.StreamingFileSink.forRowFormat(j_path, encoder._j_encoder)
return StreamingFileSink.DefaultRowFormatBuilder(j_default_row_format_builder)
class DefaultBulkFormatBuilder(BaseBuilder):
def __init__(self, j_default_bulk_format_builder):
super().__init__(j_default_bulk_format_builder)
def with_rolling_policy(self, policy: OnCheckpointRollingPolicy):
self._j_builder.withRollingPolicy(policy.get_java_object())
return self
@staticmethod
def for_bulk_format(base_path: str, writer_factory: BulkWriterFactory):
jvm = get_gateway().jvm
j_path = jvm.org.apache.flink.core.fs.Path(base_path)
j_default_bulk_format_builder = jvm.org.apache.flink.streaming.api.functions.sink \
.filesystem.StreamingFileSink.forBulkFormat(j_path, writer_factory.get_java_object())
return StreamingFileSink.DefaultBulkFormatBuilder(j_default_bulk_format_builder)
| 35,405 | 41.200238 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/cassandra.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.common import Duration
from pyflink.java_gateway import get_gateway
__all__ = [
'CassandraSink',
'ConsistencyLevel',
'MapperOptions',
'ClusterBuilder',
'CassandraCommitter',
'CassandraFailureHandler'
]
# ---- Classes introduced to construct the MapperOptions ----
class ConsistencyLevel(Enum):
"""
The consistency level
"""
ANY = 0
ONE = 1
TWO = 2
THREE = 3
QUORUM = 4
ALL = 5
LOCAL_QUORUM = 6
EACH_QUORUM = 7
SERIAL = 8
LOCAL_SERIAL = 9
LOCAL_ONE = 10
def _to_j_consistency_level(self):
JConsistencyLevel = get_gateway().jvm.com.datastax.driver.core.ConsistencyLevel
return getattr(JConsistencyLevel, self.name)
class MapperOptions(object):
"""
This class is used to configure a Mapper after deployment.
"""
def __init__(self):
"""
A simple method to construct MapperOptions.
Example:
::
>>> mapper_option = MapperOptions() \\
... .ttl(1800) \\
... .timestamp(3600) \\
... .consistency_level(ConsistencyLevel.ANY) \\
... .tracing(True) \\
... .save_null_fields(True)
"""
JSimpleMapperOptions = get_gateway().jvm.org.apache.flink.streaming.connectors. \
cassandra.SimpleMapperOptions
self._j_mapper_options = JSimpleMapperOptions()
def ttl(self, ttl: int) -> 'MapperOptions':
"""
Creates a new Option object to add time-to-live to a mapper operation. This is only
valid for save operations.
"""
self._j_mapper_options.ttl(ttl)
return self
def timestamp(self, timestamp: int) -> 'MapperOptions':
"""
Creates a new Option object to add a timestamp to a mapper operation. This is only
valid for save and delete operations.
"""
self._j_mapper_options.timestamp(timestamp)
return self
def consistency_level(self, cl: ConsistencyLevel) -> 'MapperOptions':
"""
Creates a new Option object to add a consistency level value to a mapper operation.
This is valid for save, delete and get operations.
"""
self._j_mapper_options.consistencyLevel(cl._to_j_consistency_level())
return self
def tracing(self, enabled: bool) -> 'MapperOptions':
"""
Creates a new Option object to enable query tracing for a mapper operation. This is
valid for save, delete and get operations.
"""
self._j_mapper_options.tracing(enabled)
return self
def save_null_fields(self, enabled: bool) -> 'MapperOptions':
"""
Creates a new Option object to specify whether null entity fields should be included in
insert queries. This option is valid only for save operations.
"""
self._j_mapper_options.saveNullFields(enabled)
return self
def if_not_exists(self, enabled: bool) -> 'MapperOptions':
"""
Creates a new Option object to specify whether an IF NOT EXISTS clause should be included in
insert queries. This option is valid only for save operations.
If this option is not specified, it defaults to false (IF NOT EXISTS statements are not
used).
"""
self._j_mapper_options.ifNotExists(enabled)
return self
class ClusterBuilder(object):
"""
This class is used to configure a Cluster after deployment. The cluster represents the
connection that will be established to Cassandra.
"""
def __init__(self, j_cluster_builder):
self._j_cluster_builder = j_cluster_builder
class CassandraCommitter(object):
"""
CheckpointCommitter that saves information about completed checkpoints within a separate table
in a cassandra database.
"""
def __init__(self, j_checkpoint_committer):
self._j_checkpoint_committer = j_checkpoint_committer
@staticmethod
def default_checkpoint_committer(builder: ClusterBuilder, key_space: str = None) \
-> 'CassandraCommitter':
"""
CheckpointCommitter that saves information about completed checkpoints within a separate
table in a cassandra database.
Entries are in the form: | operator_id | subtask_id | last_completed_checkpoint |
"""
JCassandraCommitter = get_gateway().jvm.org.apache.flink.streaming.connectors.\
cassandra.CassandraCommitter
if key_space is None:
j_checkpoint_committer = JCassandraCommitter(builder._j_cluster_builder)
else:
j_checkpoint_committer = JCassandraCommitter(builder._j_cluster_builder, key_space)
return CassandraCommitter(j_checkpoint_committer)
class CassandraFailureHandler(object):
"""
Handle a failed Throwable.
"""
def __init__(self, j_cassandra_failure_handler):
self._j_cassandra_failure_handler = j_cassandra_failure_handler
@staticmethod
def no_op() -> 'CassandraFailureHandler':
"""
A CassandraFailureHandler that simply fails the sink on any failures.
This is also the default failure handler if not specified.
"""
return CassandraFailureHandler(get_gateway().jvm.org.apache.flink.streaming.connectors.
cassandra.NoOpCassandraFailureHandler())
# ---- CassandraSink ----
class CassandraSink(object):
"""
Sets the ClusterBuilder for this sink. A ClusterBuilder is used to configure the connection to
cassandra.
"""
def __init__(self, j_cassandra_sink):
self._j_cassandra_sink = j_cassandra_sink
def name(self, name: str) -> 'CassandraSink':
"""
Set the name of this sink. This name is used by the visualization and logging during
runtime.
"""
self._j_cassandra_sink.name(name)
return self
def uid(self, uid: str) -> 'CassandraSink':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID
across job submissions (for example when starting a job from a savepoint).
Note that this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
"""
self._j_cassandra_sink.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'CassandraSink':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID.
The user provided hash is an alternative to the generated hashes, that is considered when
identifying an operator through the default hash mechanics fails (e.g. because of changes
between Flink versions).
Note that this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
"""
self._j_cassandra_sink.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'CassandraSink':
"""
Sets the parallelism for this sink. The degree must be higher than zero.
"""
self._j_cassandra_sink.setParallelism(parallelism)
return self
def disable_chaining(self) -> 'CassandraSink':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
"""
self._j_cassandra_sink.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: str) -> 'CassandraSink':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to {@code "default"}.
"""
self._j_cassandra_sink.slotSharingGroup(slot_sharing_group)
return self
@staticmethod
def add_sink(input) -> 'CassandraSinkBuilder':
"""
Writes a DataStream into a Cassandra database.
"""
JCassandraSink = get_gateway().jvm \
.org.apache.flink.streaming.connectors.cassandra.CassandraSink
j_cassandra_sink_builder = JCassandraSink.addSink(input._j_data_stream)
return CassandraSink.CassandraSinkBuilder(j_cassandra_sink_builder)
class CassandraSinkBuilder(object):
"""
Builder for a CassandraSink.
"""
def __init__(self, j_cassandra_sink_builder):
self._j_cassandra_sink_builder = j_cassandra_sink_builder
def set_query(self, query: str) -> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the query that is to be executed for every record.
"""
self._j_cassandra_sink_builder.setQuery(query)
return self
def set_host(self, host: str, port: int = 9042) -> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the cassandra host/port to connect to.
"""
self._j_cassandra_sink_builder.setHost(host, port)
return self
def set_cluster_builder(self, builder: ClusterBuilder) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the ClusterBuilder for this sink. A ClusterBuilder is used to configure the
connection to cassandra.
"""
self._j_cassandra_sink_builder.setClusterBuilder(builder._j_cluster_builder)
return self
def enable_write_ahead_log(self, committer: CassandraCommitter = None) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Enables the write-ahead log, which allows exactly-once processing for non-deterministic
algorithms that use idempotent updates.
"""
if committer is None:
self._j_cassandra_sink_builder.enableWriteAheadLog()
else:
self._j_cassandra_sink_builder.enableWriteAheadLog(
committer._j_checkpoint_committer)
return self
def set_mapper_options(self, options: MapperOptions) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the mapper options for this sink. The mapper options are used to configure the
DataStax com.datastax.driver.mapping.Mapper when writing POJOs.
This call has no effect if the input DataStream for this sink does not contain POJOs.
"""
self._j_cassandra_sink_builder.setMapperOptions(options._j_mapper_options)
return self
def set_failure_handler(self, failure_handler: CassandraFailureHandler) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the failure handler for this sink. The failure handler is used to provide custom
error handling.
"""
self._j_cassandra_sink_builder.setFailureHandler(
failure_handler._j_cassandra_failure_handler)
return self
def set_max_concurrent_requests(self,
max_concurrent_requests: int,
duration: Duration = None) \
-> 'CassandraSink.CassandraSinkBuilder':
"""
Sets the maximum allowed number of concurrent requests for this sink.
"""
if duration is None:
self._j_cassandra_sink_builder.setMaxConcurrentRequests(max_concurrent_requests)
else:
self._j_cassandra_sink_builder.setMaxConcurrentRequests(
max_concurrent_requests, duration._j_duration)
return self
def enable_ignore_null_fields(self) -> 'CassandraSink.CassandraSinkBuilder':
"""
Enables ignoring null values, treats null values as unset and avoids writing null fields
and creating tombstones.
This call has no effect if CassandraSinkBuilder.enableWriteAheadLog() is called.
"""
self._j_cassandra_sink_builder.enableIgnoreNullFields()
return self
def build(self) -> 'CassandraSink':
"""
Finalizes the configuration of this sink.
"""
return CassandraSink(self._j_cassandra_sink_builder.build())
| 14,269 | 37.567568 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/kafka.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import warnings
from abc import ABC, abstractmethod
from enum import Enum
from typing import Dict, Union, List, Set, Callable, Any, Optional
from py4j.java_gateway import JavaObject, get_java_class
from pyflink.common import DeserializationSchema, TypeInformation, typeinfo, SerializationSchema, \
Types, Row
from pyflink.datastream.connectors import Source, Sink
from pyflink.datastream.connectors.base import DeliveryGuarantee, SupportsPreprocessing, \
StreamTransformer
from pyflink.datastream.functions import SinkFunction, SourceFunction
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_jarray, get_field, get_field_value
__all__ = [
'FlinkKafkaConsumer',
'FlinkKafkaProducer',
'KafkaSource',
'KafkaSourceBuilder',
'KafkaSink',
'KafkaSinkBuilder',
'Semantic',
'KafkaTopicPartition',
'KafkaOffsetsInitializer',
'KafkaOffsetResetStrategy',
'KafkaRecordSerializationSchema',
'KafkaRecordSerializationSchemaBuilder',
'KafkaTopicSelector'
]
# ---- FlinkKafkaConsumer ----
class FlinkKafkaConsumerBase(SourceFunction, ABC):
"""
Base class of all Flink Kafka Consumer data sources. This implements the common behavior across
all kafka versions.
The Kafka version specific behavior is defined mainly in the specific subclasses.
"""
def __init__(self, j_flink_kafka_consumer):
super(FlinkKafkaConsumerBase, self).__init__(source_func=j_flink_kafka_consumer)
def set_commit_offsets_on_checkpoints(self,
commit_on_checkpoints: bool) -> 'FlinkKafkaConsumerBase':
"""
Specifies whether or not the consumer should commit offsets back to kafka on checkpoints.
This setting will only have effect if checkpointing is enabled for the job. If checkpointing
isn't enabled, only the "auto.commit.enable" (for 0.8) / "enable.auto.commit" (for 0.9+)
property settings will be used.
"""
self._j_function = self._j_function \
.setCommitOffsetsOnCheckpoints(commit_on_checkpoints)
return self
def set_start_from_earliest(self) -> 'FlinkKafkaConsumerBase':
"""
Specifies the consumer to start reading from the earliest offset for all partitions. This
lets the consumer ignore any committed group offsets in Zookeeper/ Kafka brokers.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function.setStartFromEarliest()
return self
def set_start_from_latest(self) -> 'FlinkKafkaConsumerBase':
"""
Specifies the consuer to start reading from the latest offset for all partitions. This lets
the consumer ignore any committed group offsets in Zookeeper / Kafka brokers.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function.setStartFromLatest()
return self
def set_start_from_timestamp(self, startup_offsets_timestamp: int) -> 'FlinkKafkaConsumerBase':
"""
Specifies the consumer to start reading partitions from a specified timestamp. The specified
timestamp must be before the current timestamp. This lets the consumer ignore any committed
group offsets in Zookeeper / Kafka brokers.
The consumer will look up the earliest offset whose timestamp is greater than or equal to
the specific timestamp from Kafka. If there's no such offset, the consumer will use the
latest offset to read data from Kafka.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
:param startup_offsets_timestamp: timestamp for the startup offsets, as milliseconds for
epoch.
"""
self._j_function = self._j_function.setStartFromTimestamp(
startup_offsets_timestamp)
return self
def set_start_from_group_offsets(self) -> 'FlinkKafkaConsumerBase':
"""
Specifies the consumer to start reading from any committed group offsets found in Zookeeper/
Kafka brokers. The 'group.id' property must be set in the configuration properties. If no
offset can be found for a partition, the behaviour in 'auto.offset.reset' set in the
configuration properties will be used for the partition.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function.setStartFromGroupOffsets()
return self
def disable_filter_restored_partitions_with_subscribed_topics(self) -> 'FlinkKafkaConsumerBase':
"""
By default, when restoring from a checkpoint / savepoint, the consumer always ignores
restored partitions that are no longer associated with the current specified topics or topic
pattern to subscribe to.
This method does not affect where partitions are read from when the consumer is restored
from a checkpoint or savepoint. When the consumer is restored from a checkpoint or
savepoint, only the offsets in the restored state will be used.
"""
self._j_function = self._j_function \
.disableFilterRestoredPartitionsWithSubscribedTopics()
return self
def get_produced_type(self) -> TypeInformation:
return typeinfo._from_java_type(self._j_function.getProducedType())
def _get_kafka_consumer(topics, properties, deserialization_schema, j_consumer_clz):
if not isinstance(topics, list):
topics = [topics]
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in properties.items():
j_properties.setProperty(key, value)
j_flink_kafka_consumer = j_consumer_clz(topics,
deserialization_schema._j_deserialization_schema,
j_properties)
return j_flink_kafka_consumer
class FlinkKafkaConsumer(FlinkKafkaConsumerBase):
"""
The Flink Kafka Consumer is a streaming data source that pulls a parallel data stream from
Apache Kafka. The consumer can run in multiple parallel instances, each of which will
pull data from one or more Kafka partitions.
The Flink Kafka Consumer participates in checkpointing and guarantees that no data is lost
during a failure, and that the computation processes elements 'exactly once. (These guarantees
naturally assume that Kafka itself does not lose any data.)
Please note that Flink snapshots the offsets internally as part of its distributed checkpoints.
The offsets committed to Kafka / Zookeeper are only to bring the outside view of progress in
sync with Flink's view of the progress. That way, monitoring and other jobs can get a view of
how far the Flink Kafka consumer has consumed a topic.
Please refer to Kafka's documentation for the available configuration properties:
http://kafka.apache.org/documentation.html#newconsumerconfigs
"""
def __init__(self, topics: Union[str, List[str]], deserialization_schema: DeserializationSchema,
properties: Dict):
"""
Creates a new Kafka streaming source consumer for Kafka 0.10.x.
This constructor allows passing multiple topics to the consumer.
:param topics: The Kafka topics to read from.
:param deserialization_schema: The de-/serializer used to convert between Kafka's byte
messages and Flink's objects.
:param properties: The properties that are used to configure both the fetcher and the offset
handler.
"""
warnings.warn("Deprecated in 1.16. Use KafkaSource instead.", DeprecationWarning)
JFlinkKafkaConsumer = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
j_flink_kafka_consumer = _get_kafka_consumer(topics, properties, deserialization_schema,
JFlinkKafkaConsumer)
super(FlinkKafkaConsumer, self).__init__(j_flink_kafka_consumer=j_flink_kafka_consumer)
# ---- FlinkKafkaProducer ----
class Semantic(Enum):
"""
Semantics that can be chosen.
:data: `EXACTLY_ONCE`:
The Flink producer will write all messages in a Kafka transaction that will be committed to
the Kafka on a checkpoint. In this mode FlinkKafkaProducer sets up a pool of
FlinkKafkaProducer. Between each checkpoint there is created new Kafka transaction, which is
being committed on FlinkKafkaProducer.notifyCheckpointComplete(long). If checkpoint
complete notifications are running late, FlinkKafkaProducer can run out of
FlinkKafkaProducers in the pool. In that case any subsequent FlinkKafkaProducer.snapshot-
State() requests will fail and the FlinkKafkaProducer will keep using the
FlinkKafkaProducer from previous checkpoint. To decrease chances of failing checkpoints
there are four options:
1. decrease number of max concurrent checkpoints
2. make checkpoints mre reliable (so that they complete faster)
3. increase delay between checkpoints
4. increase size of FlinkKafkaProducers pool
:data: `AT_LEAST_ONCE`:
The Flink producer will wait for all outstanding messages in the Kafka buffers to be
acknowledged by the Kafka producer on a checkpoint.
:data: `NONE`:
Means that nothing will be guaranteed. Messages can be lost and/or duplicated in case of
failure.
"""
EXACTLY_ONCE = 0,
AT_LEAST_ONCE = 1,
NONE = 2
def _to_j_semantic(self):
JSemantic = get_gateway().jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.Semantic
return getattr(JSemantic, self.name)
class FlinkKafkaProducerBase(SinkFunction, ABC):
"""
Flink Sink to produce data into a Kafka topic.
Please note that this producer provides at-least-once reliability guarantees when checkpoints
are enabled and set_flush_on_checkpoint(True) is set. Otherwise, the producer doesn;t provid any
reliability guarantees.
"""
def __init__(self, j_flink_kafka_producer):
super(FlinkKafkaProducerBase, self).__init__(sink_func=j_flink_kafka_producer)
def set_log_failures_only(self, log_failures_only: bool) -> 'FlinkKafkaProducerBase':
"""
Defines whether the producer should fail on errors, or only log them. If this is set to
true, then exceptions will be only logged, if set to false, exceptions will be eventually
thrown and cause the streaming program to fail (and enter recovery).
:param log_failures_only: The flag to indicate logging-only on exceptions.
"""
self._j_function.setLogFailuresOnly(log_failures_only)
return self
def set_flush_on_checkpoint(self, flush_on_checkpoint: bool) -> 'FlinkKafkaProducerBase':
"""
If set to true, the Flink producer will wait for all outstanding messages in the Kafka
buffers to be acknowledged by the Kafka producer on a checkpoint.
This way, the producer can guarantee that messages in the Kafka buffers are part of the
checkpoint.
:param flush_on_checkpoint: Flag indicating the flush mode (true = flush on checkpoint)
"""
self._j_function.setFlushOnCheckpoint(flush_on_checkpoint)
return self
def set_write_timestamp_to_kafka(self,
write_timestamp_to_kafka: bool) -> 'FlinkKafkaProducerBase':
"""
If set to true, Flink will write the (event time) timestamp attached to each record into
Kafka. Timestamps must be positive for Kafka to accept them.
:param write_timestamp_to_kafka: Flag indicating if Flink's internal timestamps are written
to Kafka.
"""
self._j_function.setWriteTimestampToKafka(write_timestamp_to_kafka)
return self
class FlinkKafkaProducer(FlinkKafkaProducerBase):
"""
Flink Sink to produce data into a Kafka topic. By
default producer will use AT_LEAST_ONCE semantic. Before using EXACTLY_ONCE please refer to
Flink's Kafka connector documentation.
"""
def __init__(self, topic: str, serialization_schema: SerializationSchema,
producer_config: Dict, kafka_producer_pool_size: int = 5,
semantic=Semantic.AT_LEAST_ONCE):
"""
Creates a FlinkKafkaProducer for a given topic. The sink produces a DataStream to the topic.
Using this constructor, the default FlinkFixedPartitioner will be used as the partitioner.
This default partitioner maps each sink subtask to a single Kafka partition (i.e. all
records received by a sink subtask will end up in the same Kafka partition).
:param topic: ID of the Kafka topic.
:param serialization_schema: User defined key-less serialization schema.
:param producer_config: Properties with the producer configuration.
"""
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in producer_config.items():
j_properties.setProperty(key, value)
JFlinkKafkaProducer = gateway.jvm \
.org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer
j_flink_kafka_producer = JFlinkKafkaProducer(
topic, serialization_schema._j_serialization_schema, j_properties, None,
semantic._to_j_semantic(), kafka_producer_pool_size)
super(FlinkKafkaProducer, self).__init__(j_flink_kafka_producer=j_flink_kafka_producer)
def ignore_failures_after_transaction_timeout(self) -> 'FlinkKafkaProducer':
"""
Disables the propagation of exceptions thrown when committing presumably timed out Kafka
transactions during recovery of the job. If a Kafka transaction is timed out, a commit will
never be successful. Hence, use this feature to avoid recovery loops of the Job. Exceptions
will still be logged to inform the user that data loss might have occurred.
Note that we use the System.currentTimeMillis() to track the age of a transaction. Moreover,
only exceptions thrown during the recovery are caught, i.e., the producer will attempt at
least one commit of the transaction before giving up.
:return: This FlinkKafkaProducer.
"""
self._j_function.ignoreFailuresAfterTransactionTimeout()
return self
# ---- KafkaSource ----
class KafkaSource(Source):
"""
The Source implementation of Kafka. Please use a :class:`KafkaSourceBuilder` to construct a
:class:`KafkaSource`. The following example shows how to create a KafkaSource emitting records
of String type.
::
>>> source = KafkaSource \\
... .builder() \\
... .set_bootstrap_servers('MY_BOOTSTRAP_SERVERS') \\
... .set_group_id('MY_GROUP') \\
... .set_topics('TOPIC1', 'TOPIC2') \\
... .set_value_only_deserializer(SimpleStringSchema()) \\
... .set_starting_offsets(KafkaOffsetsInitializer.earliest()) \\
... .build()
.. versionadded:: 1.16.0
"""
def __init__(self, j_kafka_source: JavaObject):
super().__init__(j_kafka_source)
@staticmethod
def builder() -> 'KafkaSourceBuilder':
"""
Get a kafkaSourceBuilder to build a :class:`KafkaSource`.
:return: a Kafka source builder.
"""
return KafkaSourceBuilder()
class KafkaSourceBuilder(object):
"""
The builder class for :class:`KafkaSource` to make it easier for the users to construct a
:class:`KafkaSource`.
The following example shows the minimum setup to create a KafkaSource that reads the String
values from a Kafka topic.
::
>>> source = KafkaSource.builder() \\
... .set_bootstrap_servers('MY_BOOTSTRAP_SERVERS') \\
... .set_topics('TOPIC1', 'TOPIC2') \\
... .set_value_only_deserializer(SimpleStringSchema()) \\
... .build()
The bootstrap servers, topics/partitions to consume, and the record deserializer are required
fields that must be set.
To specify the starting offsets of the KafkaSource, one can call :meth:`set_starting_offsets`.
By default, the KafkaSource runs in an CONTINUOUS_UNBOUNDED mode and never stops until the Flink
job is canceled or fails. To let the KafkaSource run in CONTINUOUS_UNBOUNDED but stops at some
given offsets, one can call :meth:`set_stopping_offsets`. For example the following KafkaSource
stops after it consumes up to the latest partition offsets at the point when the Flink started.
::
>>> source = KafkaSource.builder() \\
... .set_bootstrap_servers('MY_BOOTSTRAP_SERVERS') \\
... .set_topics('TOPIC1', 'TOPIC2') \\
... .set_value_only_deserializer(SimpleStringSchema()) \\
... .set_unbounded(KafkaOffsetsInitializer.latest()) \\
... .build()
.. versionadded:: 1.16.0
"""
def __init__(self):
self._j_builder = get_gateway().jvm.org.apache.flink.connector.kafka.source \
.KafkaSource.builder()
def build(self) -> 'KafkaSource':
return KafkaSource(self._j_builder.build())
def set_bootstrap_servers(self, bootstrap_servers: str) -> 'KafkaSourceBuilder':
"""
Sets the bootstrap servers for the KafkaConsumer of the KafkaSource.
:param bootstrap_servers: the bootstrap servers of the Kafka cluster.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setBootstrapServers(bootstrap_servers)
return self
def set_group_id(self, group_id: str) -> 'KafkaSourceBuilder':
"""
Sets the consumer group id of the KafkaSource.
:param group_id: the group id of the KafkaSource.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setGroupId(group_id)
return self
def set_topics(self, *topics: str) -> 'KafkaSourceBuilder':
"""
Set a list of topics the KafkaSource should consume from. All the topics in the list should
have existed in the Kafka cluster. Otherwise, an exception will be thrown. To allow some
topics to be created lazily, please use :meth:`set_topic_pattern` instead.
:param topics: the list of topics to consume from.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setTopics(to_jarray(get_gateway().jvm.java.lang.String, topics))
return self
def set_topic_pattern(self, topic_pattern: str) -> 'KafkaSourceBuilder':
"""
Set a topic pattern to consume from use the java Pattern. For grammar, check out
`JavaDoc <https://docs.oracle.com/javase/8/docs/api/java/util/regex/Pattern.html>`_ .
:param topic_pattern: the pattern of the topic name to consume from.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setTopicPattern(get_gateway().jvm.java.util.regex
.Pattern.compile(topic_pattern))
return self
def set_partitions(self, partitions: Set['KafkaTopicPartition']) -> 'KafkaSourceBuilder':
"""
Set a set of partitions to consume from.
Example:
::
>>> KafkaSource.builder().set_partitions({
... KafkaTopicPartition('TOPIC1', 0),
... KafkaTopicPartition('TOPIC1', 1),
... })
:param partitions: the set of partitions to consume from.
:return: this KafkaSourceBuilder.
"""
j_set = get_gateway().jvm.java.util.HashSet()
for tp in partitions:
j_set.add(tp._to_j_topic_partition())
self._j_builder.setPartitions(j_set)
return self
def set_starting_offsets(self, starting_offsets_initializer: 'KafkaOffsetsInitializer') \
-> 'KafkaSourceBuilder':
"""
Specify from which offsets the KafkaSource should start consume from by providing an
:class:`KafkaOffsetsInitializer`.
The following :class:`KafkaOffsetsInitializer` s are commonly used and provided out of the
box. Currently, customized offset initializer is not supported in PyFlink.
* :meth:`KafkaOffsetsInitializer.earliest` - starting from the earliest offsets. This is
also the default offset initializer of the KafkaSource for starting offsets.
* :meth:`KafkaOffsetsInitializer.latest` - starting from the latest offsets.
* :meth:`KafkaOffsetsInitializer.committedOffsets` - starting from the committed offsets of
the consumer group. If there is no committed offsets, starting from the offsets
specified by the :class:`KafkaOffsetResetStrategy`.
* :meth:`KafkaOffsetsInitializer.offsets` - starting from the specified offsets for each
partition.
* :meth:`KafkaOffsetsInitializer.timestamp` - starting from the specified timestamp for each
partition. Note that the guarantee here is that all the records in Kafka whose timestamp
is greater than the given starting timestamp will be consumed. However, it is possible
that some consumer records whose timestamp is smaller than the given starting timestamp
are also consumed.
:param starting_offsets_initializer: the :class:`KafkaOffsetsInitializer` setting the
starting offsets for the Source.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setStartingOffsets(starting_offsets_initializer._j_initializer)
return self
def set_unbounded(self, stopping_offsets_initializer: 'KafkaOffsetsInitializer') \
-> 'KafkaSourceBuilder':
"""
By default, the KafkaSource is set to run in CONTINUOUS_UNBOUNDED manner and thus never
stops until the Flink job fails or is canceled. To let the KafkaSource run as a streaming
source but still stops at some point, one can set an :class:`KafkaOffsetsInitializer`
to specify the stopping offsets for each partition. When all the partitions have reached
their stopping offsets, the KafkaSource will then exit.
This method is different from :meth:`set_bounded` that after setting the stopping offsets
with this method, KafkaSource will still be CONTINUOUS_UNBOUNDED even though it will stop at
the stopping offsets specified by the stopping offset initializer.
The following :class:`KafkaOffsetsInitializer` s are commonly used and provided out of the
box. Currently, customized offset initializer is not supported in PyFlink.
* :meth:`KafkaOffsetsInitializer.latest` - starting from the latest offsets.
* :meth:`KafkaOffsetsInitializer.committedOffsets` - starting from the committed offsets of
the consumer group. If there is no committed offsets, starting from the offsets
specified by the :class:`KafkaOffsetResetStrategy`.
* :meth:`KafkaOffsetsInitializer.offsets` - starting from the specified offsets for each
partition.
* :meth:`KafkaOffsetsInitializer.timestamp` - starting from the specified timestamp for each
partition. Note that the guarantee here is that all the records in Kafka whose timestamp
is greater than the given starting timestamp will be consumed. However, it is possible
that some consumer records whose timestamp is smaller than the given starting timestamp
are also consumed.
:param stopping_offsets_initializer: the :class:`KafkaOffsetsInitializer` to specify the
stopping offsets.
:return: this KafkaSourceBuilder
"""
self._j_builder.setUnbounded(stopping_offsets_initializer._j_initializer)
return self
def set_bounded(self, stopping_offsets_initializer: 'KafkaOffsetsInitializer') \
-> 'KafkaSourceBuilder':
"""
By default, the KafkaSource is set to run in CONTINUOUS_UNBOUNDED manner and thus never
stops until the Flink job fails or is canceled. To let the KafkaSource run in BOUNDED manner
and stop at some point, one can set an :class:`KafkaOffsetsInitializer` to specify the
stopping offsets for each partition. When all the partitions have reached their stopping
offsets, the KafkaSource will then exit.
This method is different from :meth:`set_unbounded` that after setting the stopping offsets
with this method, :meth:`KafkaSource.get_boundedness` will return BOUNDED instead of
CONTINUOUS_UNBOUNDED.
The following :class:`KafkaOffsetsInitializer` s are commonly used and provided out of the
box. Currently, customized offset initializer is not supported in PyFlink.
* :meth:`KafkaOffsetsInitializer.latest` - starting from the latest offsets.
* :meth:`KafkaOffsetsInitializer.committedOffsets` - starting from the committed offsets of
the consumer group. If there is no committed offsets, starting from the offsets
specified by the :class:`KafkaOffsetResetStrategy`.
* :meth:`KafkaOffsetsInitializer.offsets` - starting from the specified offsets for each
partition.
* :meth:`KafkaOffsetsInitializer.timestamp` - starting from the specified timestamp for each
partition. Note that the guarantee here is that all the records in Kafka whose timestamp
is greater than the given starting timestamp will be consumed. However, it is possible
that some consumer records whose timestamp is smaller than the given starting timestamp
are also consumed.
:param stopping_offsets_initializer: the :class:`KafkaOffsetsInitializer` to specify the
stopping offsets.
:return: this KafkaSourceBuilder
"""
self._j_builder.setBounded(stopping_offsets_initializer._j_initializer)
return self
def set_value_only_deserializer(self, deserialization_schema: DeserializationSchema) \
-> 'KafkaSourceBuilder':
"""
Sets the :class:`~pyflink.common.serialization.DeserializationSchema` for deserializing the
value of Kafka's ConsumerRecord. The other information (e.g. key) in a ConsumerRecord will
be ignored.
:param deserialization_schema: the :class:`DeserializationSchema` to use for
deserialization.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setValueOnlyDeserializer(deserialization_schema._j_deserialization_schema)
return self
def set_client_id_prefix(self, prefix: str) -> 'KafkaSourceBuilder':
"""
Sets the client id prefix of this KafkaSource.
:param prefix: the client id prefix to use for this KafkaSource.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setClientIdPrefix(prefix)
return self
def set_property(self, key: str, value: str) -> 'KafkaSourceBuilder':
"""
Set an arbitrary property for the KafkaSource and KafkaConsumer. The valid keys can be found
in ConsumerConfig and KafkaSourceOptions.
Note that the following keys will be overridden by the builder when the KafkaSource is
created.
* ``key.deserializer`` is always set to ByteArrayDeserializer.
* ``value.deserializer`` is always set to ByteArrayDeserializer.
* ``auto.offset.reset.strategy`` is overridden by AutoOffsetResetStrategy returned by
:class:`KafkaOffsetsInitializer` for the starting offsets, which is by default
:meth:`KafkaOffsetsInitializer.earliest`.
* ``partition.discovery.interval.ms`` is overridden to -1 when :meth:`set_bounded` has been
invoked.
:param key: the key of the property.
:param value: the value of the property.
:return: this KafkaSourceBuilder.
"""
self._j_builder.setProperty(key, value)
return self
def set_properties(self, props: Dict) -> 'KafkaSourceBuilder':
"""
Set arbitrary properties for the KafkaSource and KafkaConsumer. The valid keys can be found
in ConsumerConfig and KafkaSourceOptions.
Note that the following keys will be overridden by the builder when the KafkaSource is
created.
* ``key.deserializer`` is always set to ByteArrayDeserializer.
* ``value.deserializer`` is always set to ByteArrayDeserializer.
* ``auto.offset.reset.strategy`` is overridden by AutoOffsetResetStrategy returned by
:class:`KafkaOffsetsInitializer` for the starting offsets, which is by default
:meth:`KafkaOffsetsInitializer.earliest`.
* ``partition.discovery.interval.ms`` is overridden to -1 when :meth:`set_bounded` has been
invoked.
* ``client.id`` is overridden to "client.id.prefix-RANDOM_LONG", or "group.id-RANDOM_LONG"
if the client id prefix is not set.
:param props: the properties to set for the KafkaSource.
:return: this KafkaSourceBuilder.
"""
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in props.items():
j_properties.setProperty(key, value)
self._j_builder.setProperties(j_properties)
return self
class KafkaTopicPartition(object):
"""
Corresponding to Java ``org.apache.kafka.common.TopicPartition`` class.
Example:
::
>>> topic_partition = KafkaTopicPartition('TOPIC1', 0)
.. versionadded:: 1.16.0
"""
def __init__(self, topic: str, partition: int):
self._topic = topic
self._partition = partition
def _to_j_topic_partition(self):
jvm = get_gateway().jvm
return jvm.org.apache.flink.kafka.shaded.org.apache.kafka.common.TopicPartition(
self._topic, self._partition)
def __eq__(self, other):
if not isinstance(other, KafkaTopicPartition):
return False
return self._topic == other._topic and self._partition == other._partition
def __hash__(self):
return 31 * (31 + self._partition) + hash(self._topic)
class KafkaOffsetResetStrategy(Enum):
"""
Corresponding to Java ``org.apache.kafka.client.consumer.OffsetResetStrategy`` class.
.. versionadded:: 1.16.0
"""
LATEST = 0
EARLIEST = 1
NONE = 2
def _to_j_offset_reset_strategy(self):
JOffsetResetStrategy = get_gateway().jvm.org.apache.flink.kafka.shaded.org.apache.kafka.\
clients.consumer.OffsetResetStrategy
return getattr(JOffsetResetStrategy, self.name)
class KafkaOffsetsInitializer(object):
"""
An interface for users to specify the starting / stopping offset of a KafkaPartitionSplit.
.. versionadded:: 1.16.0
"""
def __init__(self, j_initializer: JavaObject):
self._j_initializer = j_initializer
@staticmethod
def committed_offsets(
offset_reset_strategy: 'KafkaOffsetResetStrategy' = KafkaOffsetResetStrategy.NONE) -> \
'KafkaOffsetsInitializer':
"""
Get an :class:`KafkaOffsetsInitializer` which initializes the offsets to the committed
offsets. An exception will be thrown at runtime if there is no committed offsets.
An optional :class:`KafkaOffsetResetStrategy` can be specified to initialize the offsets if
the committed offsets does not exist.
:param offset_reset_strategy: the offset reset strategy to use when the committed offsets do
not exist.
:return: an offset initializer which initialize the offsets to the committed offsets.
"""
JOffsetsInitializer = get_gateway().jvm.org.apache.flink.connector.kafka.source.\
enumerator.initializer.OffsetsInitializer
return KafkaOffsetsInitializer(JOffsetsInitializer.committedOffsets(
offset_reset_strategy._to_j_offset_reset_strategy()))
@staticmethod
def timestamp(timestamp: int) -> 'KafkaOffsetsInitializer':
"""
Get an :class:`KafkaOffsetsInitializer` which initializes the offsets in each partition so
that the initialized offset is the offset of the first record whose record timestamp is
greater than or equals the give timestamp.
:param timestamp: the timestamp to start the consumption.
:return: an :class:`OffsetsInitializer` which initializes the offsets based on the given
timestamp.
"""
JOffsetsInitializer = get_gateway().jvm.org.apache.flink.connector.kafka.source. \
enumerator.initializer.OffsetsInitializer
return KafkaOffsetsInitializer(JOffsetsInitializer.timestamp(timestamp))
@staticmethod
def earliest() -> 'KafkaOffsetsInitializer':
"""
Get an :class:`KafkaOffsetsInitializer` which initializes the offsets to the earliest
available offsets of each partition.
:return: an :class:`KafkaOffsetsInitializer` which initializes the offsets to the earliest
available offsets.
"""
JOffsetsInitializer = get_gateway().jvm.org.apache.flink.connector.kafka.source. \
enumerator.initializer.OffsetsInitializer
return KafkaOffsetsInitializer(JOffsetsInitializer.earliest())
@staticmethod
def latest() -> 'KafkaOffsetsInitializer':
"""
Get an :class:`KafkaOffsetsInitializer` which initializes the offsets to the latest offsets
of each partition.
:return: an :class:`KafkaOffsetsInitializer` which initializes the offsets to the latest
offsets.
"""
JOffsetsInitializer = get_gateway().jvm.org.apache.flink.connector.kafka.source. \
enumerator.initializer.OffsetsInitializer
return KafkaOffsetsInitializer(JOffsetsInitializer.latest())
@staticmethod
def offsets(offsets: Dict['KafkaTopicPartition', int],
offset_reset_strategy: 'KafkaOffsetResetStrategy' =
KafkaOffsetResetStrategy.EARLIEST) -> 'KafkaOffsetsInitializer':
"""
Get an :class:`KafkaOffsetsInitializer` which initializes the offsets to the specified
offsets.
An optional :class:`KafkaOffsetResetStrategy` can be specified to initialize the offsets in
case the specified offset is out of range.
Example:
::
>>> KafkaOffsetsInitializer.offsets({
... KafkaTopicPartition('TOPIC1', 0): 0,
... KafkaTopicPartition('TOPIC1', 1): 10000
... }, KafkaOffsetResetStrategy.EARLIEST)
:param offsets: the specified offsets for each partition.
:param offset_reset_strategy: the :class:`KafkaOffsetResetStrategy` to use when the
specified offset is out of range.
:return: an :class:`KafkaOffsetsInitializer` which initializes the offsets to the specified
offsets.
"""
jvm = get_gateway().jvm
j_map_wrapper = jvm.org.apache.flink.python.util.HashMapWrapper(
None, get_java_class(jvm.Long))
for tp, offset in offsets.items():
j_map_wrapper.put(tp._to_j_topic_partition(), offset)
JOffsetsInitializer = get_gateway().jvm.org.apache.flink.connector.kafka.source. \
enumerator.initializer.OffsetsInitializer
return KafkaOffsetsInitializer(JOffsetsInitializer.offsets(
j_map_wrapper.asMap(), offset_reset_strategy._to_j_offset_reset_strategy()))
class KafkaSink(Sink, SupportsPreprocessing):
"""
Flink Sink to produce data into a Kafka topic. The sink supports all delivery guarantees
described by :class:`DeliveryGuarantee`.
* :attr:`DeliveryGuarantee.NONE` does not provide any guarantees: messages may be lost in case
of issues on the Kafka broker and messages may be duplicated in case of a Flink failure.
* :attr:`DeliveryGuarantee.AT_LEAST_ONCE` the sink will wait for all outstanding records in the
Kafka buffers to be acknowledged by the Kafka producer on a checkpoint. No messages will be
lost in case of any issue with the Kafka brokers but messages may be duplicated when Flink
restarts.
* :attr:`DeliveryGuarantee.EXACTLY_ONCE`: In this mode the KafkaSink will write all messages in
a Kafka transaction that will be committed to Kafka on a checkpoint. Thus, if the consumer
reads only committed data (see Kafka consumer config ``isolation.level``), no duplicates
will be seen in case of a Flink restart. However, this delays record writing effectively
until a checkpoint is written, so adjust the checkpoint duration accordingly. Please ensure
that you use unique transactional id prefixes across your applications running on the same
Kafka cluster such that multiple running jobs do not interfere in their transactions!
Additionally, it is highly recommended to tweak Kafka transaction timeout (link) >> maximum
checkpoint duration + maximum restart duration or data loss may happen when Kafka expires an
uncommitted transaction.
.. versionadded:: 1.16.0
"""
def __init__(self, j_kafka_sink, transformer: Optional[StreamTransformer] = None):
super().__init__(j_kafka_sink)
self._transformer = transformer
@staticmethod
def builder() -> 'KafkaSinkBuilder':
"""
Create a :class:`KafkaSinkBuilder` to construct :class:`KafkaSink`.
"""
return KafkaSinkBuilder()
def get_transformer(self) -> Optional[StreamTransformer]:
return self._transformer
class KafkaSinkBuilder(object):
"""
Builder to construct :class:`KafkaSink`.
The following example shows the minimum setup to create a KafkaSink that writes String values
to a Kafka topic.
::
>>> record_serializer = KafkaRecordSerializationSchema.builder() \\
... .set_topic(MY_SINK_TOPIC) \\
... .set_value_serialization_schema(SimpleStringSchema()) \\
... .build()
>>> sink = KafkaSink.builder() \\
... .set_bootstrap_servers(MY_BOOTSTRAP_SERVERS) \\
... .set_record_serializer(record_serializer) \\
... .build()
One can also configure different :class:`DeliveryGuarantee` by using
:meth:`set_delivery_guarantee` but keep in mind when using
:attr:`DeliveryGuarantee.EXACTLY_ONCE`, one must set the transactional id prefix
:meth:`set_transactional_id_prefix`.
.. versionadded:: 1.16.0
"""
def __init__(self):
jvm = get_gateway().jvm
self._j_builder = jvm.org.apache.flink.connector.kafka.sink.KafkaSink.builder()
self._preprocessing = None
def build(self) -> 'KafkaSink':
"""
Constructs the :class:`KafkaSink` with the configured properties.
"""
return KafkaSink(self._j_builder.build(), self._preprocessing)
def set_bootstrap_servers(self, bootstrap_servers: str) -> 'KafkaSinkBuilder':
"""
Sets the Kafka bootstrap servers.
:param bootstrap_servers: A comma separated list of valid URIs to reach the Kafka broker.
"""
self._j_builder.setBootstrapServers(bootstrap_servers)
return self
def set_delivery_guarantee(self, delivery_guarantee: DeliveryGuarantee) -> 'KafkaSinkBuilder':
"""
Sets the wanted :class:`DeliveryGuarantee`. The default delivery guarantee is
:attr:`DeliveryGuarantee.NONE`.
:param delivery_guarantee: The wanted :class:`DeliveryGuarantee`.
"""
self._j_builder.setDeliveryGuarantee(delivery_guarantee._to_j_delivery_guarantee())
return self
def set_transactional_id_prefix(self, transactional_id_prefix: str) -> 'KafkaSinkBuilder':
"""
Sets the prefix for all created transactionalIds if :attr:`DeliveryGuarantee.EXACTLY_ONCE`
is configured.
It is mandatory to always set this value with :attr:`DeliveryGuarantee.EXACTLY_ONCE` to
prevent corrupted transactions if multiple jobs using the KafkaSink run against the same
Kafka Cluster. The default prefix is ``"kafka-sink"``.
The size of the prefix is capped by MAXIMUM_PREFIX_BYTES (6400) formatted with UTF-8.
It is important to keep the prefix stable across application restarts. If the prefix changes
it might happen that lingering transactions are not correctly aborted and newly written
messages are not immediately consumable until transactions timeout.
:param transactional_id_prefix: The transactional id prefix.
"""
self._j_builder.setTransactionalIdPrefix(transactional_id_prefix)
return self
def set_record_serializer(self, record_serializer: 'KafkaRecordSerializationSchema') \
-> 'KafkaSinkBuilder':
"""
Sets the :class:`KafkaRecordSerializationSchema` that transforms incoming records to kafka
producer records.
:param record_serializer: The :class:`KafkaRecordSerializationSchema`.
"""
# NOTE: If topic selector is a generated first-column selector, do extra preprocessing
j_topic_selector = get_field_value(record_serializer._j_serialization_schema,
'topicSelector')
if (
j_topic_selector.getClass().getCanonicalName() ==
'org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchemaBuilder.'
'CachingTopicSelector'
) and (
get_field_value(j_topic_selector, 'topicSelector').getClass().getCanonicalName()
is not None and
(get_field_value(j_topic_selector, 'topicSelector').getClass().getCanonicalName()
.startswith('com.sun.proxy') or
get_field_value(j_topic_selector, 'topicSelector').getClass().getCanonicalName()
.startswith('jdk.proxy'))
):
record_serializer._wrap_serialization_schema()
self._preprocessing = record_serializer._build_preprocessing()
self._j_builder.setRecordSerializer(record_serializer._j_serialization_schema)
return self
def set_property(self, key: str, value: str) -> 'KafkaSinkBuilder':
"""
Sets kafka producer config.
:param key: Kafka producer config key.
:param value: Kafka producer config value.
"""
self._j_builder.setProperty(key, value)
return self
class KafkaRecordSerializationSchema(SerializationSchema):
"""
A serialization schema which defines how to convert the stream record to kafka producer record.
.. versionadded:: 1.16.0
"""
def __init__(self, j_serialization_schema,
topic_selector: Optional['KafkaTopicSelector'] = None):
super().__init__(j_serialization_schema)
self._topic_selector = topic_selector
@staticmethod
def builder() -> 'KafkaRecordSerializationSchemaBuilder':
"""
Creates a default schema builder to provide common building blocks i.e. key serialization,
value serialization, topic selection.
"""
return KafkaRecordSerializationSchemaBuilder()
def _wrap_serialization_schema(self):
jvm = get_gateway().jvm
def _wrap_schema(field_name):
j_schema_field = get_field(self._j_serialization_schema.getClass(), field_name)
if j_schema_field.get(self._j_serialization_schema) is not None:
j_schema_field.set(
self._j_serialization_schema,
jvm.org.apache.flink.python.util.PythonConnectorUtils
.SecondColumnSerializationSchema(
j_schema_field.get(self._j_serialization_schema)
)
)
_wrap_schema('keySerializationSchema')
_wrap_schema('valueSerializationSchema')
def _build_preprocessing(self) -> StreamTransformer:
class SelectTopicTransformer(StreamTransformer):
def __init__(self, topic_selector: KafkaTopicSelector):
self._topic_selector = topic_selector
def apply(self, ds):
output_type = Types.ROW([Types.STRING(), ds.get_type()])
return ds.map(lambda v: Row(self._topic_selector.apply(v), v),
output_type=output_type)
return SelectTopicTransformer(self._topic_selector)
class KafkaRecordSerializationSchemaBuilder(object):
"""
Builder to construct :class:`KafkaRecordSerializationSchema`.
Example:
::
>>> KafkaRecordSerializationSchema.builder() \\
... .set_topic('topic') \\
... .set_key_serialization_schema(SimpleStringSchema()) \\
... .set_value_serialization_schema(SimpleStringSchema()) \\
... .build()
And the sink topic can be calculated dynamically from each record:
::
>>> KafkaRecordSerializationSchema.builder() \\
... .set_topic_selector(lambda row: 'topic-' + row['category']) \\
... .set_value_serialization_schema(
... JsonRowSerializationSchema.builder().with_type_info(ROW_TYPE).build()) \\
... .build()
It is necessary to configure exactly one serialization method for the value and a topic.
.. versionadded:: 1.16.0
"""
def __init__(self):
jvm = get_gateway().jvm
self._j_builder = jvm.org.apache.flink.connector.kafka.sink \
.KafkaRecordSerializationSchemaBuilder()
self._fixed_topic = True # type: bool
self._topic_selector = None # type: Optional[KafkaTopicSelector]
self._key_serialization_schema = None # type: Optional[SerializationSchema]
self._value_serialization_schema = None # type: Optional[SerializationSchema]
def build(self) -> 'KafkaRecordSerializationSchema':
"""
Constructs the :class:`KafkaRecordSerializationSchemaBuilder` with the configured
properties.
"""
if self._fixed_topic:
return KafkaRecordSerializationSchema(self._j_builder.build())
else:
return KafkaRecordSerializationSchema(self._j_builder.build(), self._topic_selector)
def set_topic(self, topic: str) -> 'KafkaRecordSerializationSchemaBuilder':
"""
Sets a fixed topic which used as destination for all records.
:param topic: The fixed topic.
"""
self._j_builder.setTopic(topic)
self._fixed_topic = True
return self
def set_topic_selector(self, topic_selector: Union[Callable[[Any], str], 'KafkaTopicSelector'])\
-> 'KafkaRecordSerializationSchemaBuilder':
"""
Sets a topic selector which computes the target topic for every incoming record.
:param topic_selector: A :class:`KafkaTopicSelector` implementation or a function that
consumes each incoming record and return the topic string.
"""
if not isinstance(topic_selector, KafkaTopicSelector) and not callable(topic_selector):
raise TypeError('topic_selector must be KafkaTopicSelector or a callable')
if not isinstance(topic_selector, KafkaTopicSelector):
class TopicSelectorFunctionAdapter(KafkaTopicSelector):
def __init__(self, f: Callable[[Any], str]):
self._f = f
def apply(self, data) -> str:
return self._f(data)
topic_selector = TopicSelectorFunctionAdapter(topic_selector)
jvm = get_gateway().jvm
self._j_builder.setTopicSelector(
jvm.org.apache.flink.python.util.PythonConnectorUtils.createFirstColumnTopicSelector(
get_java_class(jvm.org.apache.flink.connector.kafka.sink.TopicSelector)
)
)
self._fixed_topic = False
self._topic_selector = topic_selector
return self
def set_key_serialization_schema(self, key_serialization_schema: SerializationSchema) \
-> 'KafkaRecordSerializationSchemaBuilder':
"""
Sets a :class:`SerializationSchema` which is used to serialize the incoming element to the
key of the producer record. The key serialization is optional, if not set, the key of the
producer record will be null.
:param key_serialization_schema: The :class:`SerializationSchema` to serialize each incoming
record as the key of producer record.
"""
self._key_serialization_schema = key_serialization_schema
self._j_builder.setKeySerializationSchema(key_serialization_schema._j_serialization_schema)
return self
def set_value_serialization_schema(self, value_serialization_schema: SerializationSchema) \
-> 'KafkaRecordSerializationSchemaBuilder':
"""
Sets a :class:`SerializationSchema` which is used to serialize the incoming element to the
value of the producer record. The value serialization is required.
:param value_serialization_schema: The :class:`SerializationSchema` to serialize each data
record as the key of producer record.
"""
self._value_serialization_schema = value_serialization_schema
self._j_builder.setValueSerializationSchema(
value_serialization_schema._j_serialization_schema)
return self
class KafkaTopicSelector(ABC):
"""
Select topic for an incoming record
.. versionadded:: 1.16.0
"""
@abstractmethod
def apply(self, data) -> str:
pass
| 51,643 | 43.367698 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/pulsar.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import warnings
from enum import Enum
from typing import Dict, Union, List, Optional
from pyflink.common import DeserializationSchema, ConfigOptions, Duration, SerializationSchema, \
ConfigOption
from pyflink.datastream.connectors import Source, Sink, DeliveryGuarantee
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import load_java_class
__all__ = [
'PulsarSource',
'PulsarSourceBuilder',
'StartCursor',
'StopCursor',
'RangeGenerator',
'PulsarSink',
'PulsarSinkBuilder',
'MessageDelayer',
'TopicRoutingMode'
]
# ---- PulsarSource ----
class StartCursor(object):
"""
A factory class for users to specify the start position of a pulsar subscription.
Since it would be serialized into split.
The implementation for this interface should be well considered.
I don't recommend adding extra internal state for this implementation.
This class would be used only for SubscriptionType.Exclusive and SubscriptionType.Failover.
"""
def __init__(self, _j_start_cursor):
self._j_start_cursor = _j_start_cursor
@staticmethod
def default_start_cursor() -> 'StartCursor':
return StartCursor.earliest()
@staticmethod
def earliest() -> 'StartCursor':
JStartCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor
return StartCursor(JStartCursor.earliest())
@staticmethod
def latest() -> 'StartCursor':
JStartCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor
return StartCursor(JStartCursor.latest())
@staticmethod
def from_message_id(message_id: bytes, inclusive: bool = True) -> 'StartCursor':
"""
Find the available message id and start consuming from it. User could call pulsar Python
library serialize method to cover messageId bytes.
Example:
::
>>> from pulsar import MessageId
>>> message_id_bytes = MessageId().serialize()
>>> start_cursor = StartCursor.from_message_id(message_id_bytes)
"""
JStartCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor
j_message_id = get_gateway().jvm.org.apache.pulsar.client.api.MessageId \
.fromByteArray(message_id)
return StartCursor(JStartCursor.fromMessageId(j_message_id, inclusive))
@staticmethod
def from_publish_time(timestamp: int) -> 'StartCursor':
"""
Seek the start position by using message publish time.
"""
JStartCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StartCursor
return StartCursor(JStartCursor.fromPublishTime(timestamp))
class StopCursor(object):
"""
A factory class for users to specify the stop position of a pulsar subscription. Since it would
be serialized into split. The implementation for this interface should be well considered. I
don't recommend adding extra internal state for this implementation.
"""
def __init__(self, _j_stop_cursor):
self._j_stop_cursor = _j_stop_cursor
@staticmethod
def default_stop_cursor() -> 'StopCursor':
return StopCursor.never()
@staticmethod
def never() -> 'StopCursor':
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
return StopCursor(JStopCursor.never())
@staticmethod
def latest() -> 'StopCursor':
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
return StopCursor(JStopCursor.latest())
@staticmethod
def at_event_time(timestamp: int) -> 'StopCursor':
"""
Stop consuming when message eventTime is greater than or equals the specified timestamp.
"""
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
return StopCursor(JStopCursor.atEventTime(timestamp))
@staticmethod
def after_event_time(timestamp: int) -> 'StopCursor':
"""
Stop consuming when message eventTime is greater than the specified timestamp.
"""
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
return StopCursor(JStopCursor.afterEventTime(timestamp))
@staticmethod
def at_publish_time(timestamp: int) -> 'StopCursor':
"""
Stop consuming when message publishTime is greater than or equals the specified timestamp.
"""
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
return StopCursor(JStopCursor.atPublishTime(timestamp))
@staticmethod
def after_publish_time(timestamp: int) -> 'StopCursor':
"""
Stop consuming when message publishTime is greater than the specified timestamp.
"""
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
return StopCursor(JStopCursor.afterPublishTime(timestamp))
@staticmethod
def at_message_id(message_id: bytes) -> 'StopCursor':
"""
Stop consuming when the messageId is equal or greater than the specified messageId.
Message that is equal to the specified messageId will not be consumed. User could call
pulsar Python library serialize method to cover messageId bytes.
Example:
::
>>> from pulsar import MessageId
>>> message_id_bytes = MessageId().serialize()
>>> stop_cursor = StopCursor.at_message_id(message_id_bytes)
"""
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
j_message_id = get_gateway().jvm.org.apache.pulsar.client.api.MessageId \
.fromByteArray(message_id)
return StopCursor(JStopCursor.atMessageId(j_message_id))
@staticmethod
def after_message_id(message_id: bytes) -> 'StopCursor':
"""
Stop consuming when the messageId is greater than the specified messageId. Message that is
equal to the specified messageId will be consumed. User could call pulsar Python library
serialize method to cover messageId bytes.
Example:
::
>>> from pulsar import MessageId
>>> message_id_bytes = MessageId().serialize()
>>> stop_cursor = StopCursor.after_message_id(message_id_bytes)
"""
JStopCursor = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.cursor.StopCursor
j_message_id = get_gateway().jvm.org.apache.pulsar.client.api.MessageId \
.fromByteArray(message_id)
return StopCursor(JStopCursor.afterMessageId(j_message_id))
class RangeGenerator(object):
"""
A generator for generating the TopicRange for given topic. It was used for pulsar's
SubscriptionType#Key_Shared mode. TopicRange would be used in KeySharedPolicy for different
pulsar source readers.
If you implement this interface, make sure that each TopicRange would be assigned to a
specified source reader. Since flink parallelism is provided, make sure the pulsar message key's
hashcode is evenly distributed among these topic ranges.
"""
def __init__(self, j_range_generator):
self._j_range_generator = j_range_generator
@staticmethod
def full() -> 'RangeGenerator':
"""
Default implementation for SubscriptionType#Shared, SubscriptionType#Failover and
SubscriptionType#Exclusive subscription.
"""
JFullRangeGenerator = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.topic.range.FullRangeGenerator
return RangeGenerator(JFullRangeGenerator())
@staticmethod
def fixed_key(support_null_key: bool = False,
keys: Optional[Union[str, List[str]]] = None,
key_bytes: Optional[bytes] = None,
ordering_key_bytes: Optional[bytes] = None) -> 'RangeGenerator':
"""
Pulsar didn't expose the key hash range method. We have to provide an implementation for
end-user. You can add the keys you want to consume, no need to provide any hash ranges.
Since the key's hash isn't specified to only one key. The consuming results may contain the
messages with different keys comparing the keys you have defined in this range generator.
Remember to use flink's DataStream.filter() method.
:param support_null_key: Some Message in Pulsar may not have Message#getOrderingKey() or
Message#getKey(), use this method for supporting consuming such
messages.
:param keys: If you set the message key by using PulsarMessageBuilder#key(String) or
TypedMessageBuilder#key(String), use this method for supporting consuming such
messages.
:param key_bytes: If you set the message key by using TypedMessageBuilder#keyBytes(byte[]),
use this method for supporting consuming such messages.
:param ordering_key_bytes: Pulsar's ordering key is prior to the message key. If you set
the ordering key by using
PulsarMessageBuilder#orderingKey(byte[]) or
TypedMessageBuilder#orderingKey(byte[]), use this method for
supporting consuming such messages.
* messages.
"""
JFixedKeysRangeGenerator = get_gateway().jvm \
.org.apache.flink.connector.pulsar.source.enumerator.topic.range.FixedKeysRangeGenerator
j_range_generator_builder = JFixedKeysRangeGenerator.builder()
if support_null_key:
j_range_generator_builder.supportNullKey()
if keys is not None:
if isinstance(keys, str):
j_range_generator_builder.key(keys)
else:
for key in keys:
j_range_generator_builder.key(key)
if key_bytes is not None:
j_range_generator_builder.keyBytes(key_bytes)
if ordering_key_bytes is not None:
j_range_generator_builder.orderingKey(ordering_key_bytes)
return RangeGenerator(j_range_generator_builder.build())
class PulsarSource(Source):
"""
The Source implementation of Pulsar. Please use a PulsarSourceBuilder to construct a
PulsarSource. The following example shows how to create a PulsarSource emitting records of
String type.
Example:
::
>>> source = PulsarSource() \\
... .builder() \\
... .set_topics([TOPIC1, TOPIC2]) \\
... .set_service_url(get_service_url()) \\
... .set_admin_url(get_admin_url()) \\
... .set_subscription_name("test") \\
... .set_deserialization_schema(SimpleStringSchema()) \\
... .set_bounded_stop_cursor(StopCursor.default_stop_cursor()) \\
... .build()
See PulsarSourceBuilder for more details.
"""
def __init__(self, j_pulsar_source):
super(PulsarSource, self).__init__(source=j_pulsar_source)
@staticmethod
def builder() -> 'PulsarSourceBuilder':
"""
Get a PulsarSourceBuilder to builder a PulsarSource.
"""
return PulsarSourceBuilder()
class PulsarSourceBuilder(object):
"""
The builder class for PulsarSource to make it easier for the users to construct a PulsarSource.
The following example shows the minimum setup to create a PulsarSource that reads the String
values from a Pulsar topic.
Example:
::
>>> source = PulsarSource() \\
... .builder() \\
... .set_service_url(PULSAR_BROKER_URL) \\
... .set_admin_url(PULSAR_BROKER_HTTP_URL) \\
... .set_subscription_name("flink-source-1") \\
... .set_topics([TOPIC1, TOPIC2]) \\
... .set_deserialization_schema(SimpleStringSchema()) \\
... .build()
The service url, admin url, subscription name, topics to consume, and the record deserializer
are required fields that must be set.
To specify the starting position of PulsarSource, one can call set_start_cursor(StartCursor).
By default the PulsarSource runs in an Boundedness.CONTINUOUS_UNBOUNDED mode and never stop
until the Flink job is canceled or fails. To let the PulsarSource run in
Boundedness.CONTINUOUS_UNBOUNDED but stops at some given offsets, one can call
set_unbounded_stop_cursor(StopCursor).
For example the following PulsarSource stops after it consumes up to a event time when the
Flink started.
Example:
::
>>> source = PulsarSource() \\
... .builder() \\
... .set_service_url(PULSAR_BROKER_URL) \\
... .set_admin_url(PULSAR_BROKER_HTTP_URL) \\
... .set_subscription_name("flink-source-1") \\
... .set_topics([TOPIC1, TOPIC2]) \\
... .set_deserialization_schema(SimpleStringSchema()) \\
... .set_bounded_stop_cursor(StopCursor.at_publish_time(int(time.time() * 1000)))
... .build()
"""
def __init__(self):
JPulsarSource = \
get_gateway().jvm.org.apache.flink.connector.pulsar.source.PulsarSource
self._j_pulsar_source_builder = JPulsarSource.builder()
def set_admin_url(self, admin_url: str) -> 'PulsarSourceBuilder':
"""
Sets the admin endpoint for the PulsarAdmin of the PulsarSource.
"""
self._j_pulsar_source_builder.setAdminUrl(admin_url)
return self
def set_service_url(self, service_url: str) -> 'PulsarSourceBuilder':
"""
Sets the server's link for the PulsarConsumer of the PulsarSource.
"""
self._j_pulsar_source_builder.setServiceUrl(service_url)
return self
def set_subscription_name(self, subscription_name: str) -> 'PulsarSourceBuilder':
"""
Sets the name for this pulsar subscription.
"""
self._j_pulsar_source_builder.setSubscriptionName(subscription_name)
return self
def set_topics(self, topics: Union[str, List[str]]) -> 'PulsarSourceBuilder':
"""
Set a pulsar topic list for flink source. Some topic may not exist currently, consuming this
non-existed topic wouldn't throw any exception. But the best solution is just consuming by
using a topic regex. You can set topics once either with setTopics or setTopicPattern in
this builder.
"""
if not isinstance(topics, list):
topics = [topics]
self._j_pulsar_source_builder.setTopics(topics)
return self
def set_topic_pattern(self, topic_pattern: str) -> 'PulsarSourceBuilder':
"""
Set a topic pattern to consume from the java regex str. You can set topics once either with
set_topics or set_topic_pattern in this builder.
"""
self._j_pulsar_source_builder.setTopicPattern(topic_pattern)
return self
def set_consumer_name(self, consumer_name: str) -> 'PulsarSourceBuilder':
"""
The consumer name is informative, and it can be used to identify a particular consumer
instance from the topic stats.
.. versionadded:: 1.17.2
"""
self._j_pulsar_source_builder.setConsumerName(consumer_name)
return self
def set_range_generator(self, range_generator: RangeGenerator) -> 'PulsarSourceBuilder':
"""
Set a topic range generator for consuming a sub set of keys.
:param range_generator: A generator which would generate a set of TopicRange for given
topic.
.. versionadded:: 1.17.2
"""
self._j_pulsar_source_builder.setRangeGenerator(range_generator._j_range_generator)
return self
def set_start_cursor(self, start_cursor: StartCursor) -> 'PulsarSourceBuilder':
"""
Specify from which offsets the PulsarSource should start consume from by providing an
StartCursor.
"""
self._j_pulsar_source_builder.setStartCursor(start_cursor._j_start_cursor)
return self
def set_unbounded_stop_cursor(self, stop_cursor: StopCursor) -> 'PulsarSourceBuilder':
"""
By default the PulsarSource is set to run in Boundedness.CONTINUOUS_UNBOUNDED manner and
thus never stops until the Flink job fails or is canceled. To let the PulsarSource run as a
streaming source but still stops at some point, one can set an StopCursor to specify the
stopping offsets for each partition. When all the partitions have reached their stopping
offsets, the PulsarSource will then exit.
This method is different from set_bounded_stop_cursor(StopCursor) that after setting the
stopping offsets with this method, PulsarSource.getBoundedness() will still return
Boundedness.CONTINUOUS_UNBOUNDED even though it will stop at the stopping offsets specified
by the stopping offsets StopCursor.
"""
self._j_pulsar_source_builder.setUnboundedStopCursor(stop_cursor._j_stop_cursor)
return self
def set_bounded_stop_cursor(self, stop_cursor: StopCursor) -> 'PulsarSourceBuilder':
"""
By default the PulsarSource is set to run in Boundedness.CONTINUOUS_UNBOUNDED manner and
thus never stops until the Flink job fails or is canceled. To let the PulsarSource run in
Boundedness.BOUNDED manner and stops at some point, one can set an StopCursor to specify
the stopping offsets for each partition. When all the partitions have reached their stopping
offsets, the PulsarSource will then exit.
This method is different from set_unbounded_stop_cursor(StopCursor) that after setting the
stopping offsets with this method, PulsarSource.getBoundedness() will return
Boundedness.BOUNDED instead of Boundedness.CONTINUOUS_UNBOUNDED.
"""
self._j_pulsar_source_builder.setBoundedStopCursor(stop_cursor._j_stop_cursor)
return self
def set_deserialization_schema(self, deserialization_schema: DeserializationSchema) \
-> 'PulsarSourceBuilder':
"""
Sets the :class:`~pyflink.common.serialization.DeserializationSchema` for deserializing the
value of Pulsars message.
:param deserialization_schema: the :class:`DeserializationSchema` to use for
deserialization.
:return: this PulsarSourceBuilder.
"""
self._j_pulsar_source_builder.setDeserializationSchema(
deserialization_schema._j_deserialization_schema)
return self
def set_authentication(self,
auth_plugin_class_name: str,
auth_params_string: Union[str, Dict[str, str]]) \
-> 'PulsarSourceBuilder':
"""
Configure the authentication provider to use in the Pulsar client instance.
:param auth_plugin_class_name: Name of the Authentication-Plugin you want to use.
:param auth_params_string: String which represents parameters for the Authentication-Plugin,
e.g., "key1:val1,key2:val2".
.. versionadded:: 1.17.2
"""
if isinstance(auth_params_string, str):
self._j_pulsar_source_builder.setAuthentication(
auth_plugin_class_name, auth_params_string)
else:
j_auth_params_map = get_gateway().jvm.java.util.HashMap()
for k, v in auth_params_string.items():
j_auth_params_map.put(k, v)
self._j_pulsar_source_builder.setAuthentication(
auth_plugin_class_name, j_auth_params_map)
return self
def set_config(self, key: Union[str, ConfigOption], value) -> 'PulsarSourceBuilder':
"""
Set arbitrary properties for the PulsarSource and PulsarConsumer. The valid keys can be
found in PulsarSourceOptions and PulsarOptions.
Make sure the option could be set only once or with same value.
"""
if isinstance(key, ConfigOption):
warnings.warn("set_config(key: ConfigOption, value) is deprecated. "
"Use set_config(key: str, value) instead.",
DeprecationWarning, stacklevel=2)
j_config_option = key._j_config_option
else:
j_config_option = \
ConfigOptions.key(key).string_type().no_default_value()._j_config_option
self._j_pulsar_source_builder.setConfig(j_config_option, value)
return self
def set_config_with_dict(self, config: Dict) -> 'PulsarSourceBuilder':
"""
Set arbitrary properties for the PulsarSource and PulsarConsumer. The valid keys can be
found in PulsarSourceOptions and PulsarOptions.
"""
warnings.warn("set_config_with_dict is deprecated. Use set_properties instead.",
DeprecationWarning, stacklevel=2)
self.set_properties(config)
return self
def set_properties(self, config: Dict) -> 'PulsarSourceBuilder':
"""
Set arbitrary properties for the PulsarSource and PulsarConsumer. The valid keys can be
found in PulsarSourceOptions and PulsarOptions.
"""
JConfiguration = get_gateway().jvm.org.apache.flink.configuration.Configuration
self._j_pulsar_source_builder.setConfig(JConfiguration.fromMap(config))
return self
def build(self) -> 'PulsarSource':
"""
Build the PulsarSource.
"""
return PulsarSource(self._j_pulsar_source_builder.build())
# ---- PulsarSink ----
class TopicRoutingMode(Enum):
"""
The routing policy for choosing the desired topic by the given message.
:data: `ROUND_ROBIN`:
The producer will publish messages across all partitions in a round-robin fashion to achieve
maximum throughput. Please note that round-robin is not done per individual message but
rather it's set to the same boundary of batching delay, to ensure batching is effective.
:data: `MESSAGE_KEY_HASH`:
If no key is provided, The partitioned producer will randomly pick one single topic partition
and publish all the messages into that partition. If a key is provided on the message, the
partitioned producer will hash the key and assign the message to a particular partition.
:data: `CUSTOM`:
Use custom topic router implementation that will be called to determine the partition for a
particular message.
"""
ROUND_ROBIN = 0
MESSAGE_KEY_HASH = 1
CUSTOM = 2
def _to_j_topic_routing_mode(self):
JTopicRoutingMode = get_gateway().jvm \
.org.apache.flink.connector.pulsar.sink.writer.router.TopicRoutingMode
return getattr(JTopicRoutingMode, self.name)
class MessageDelayer(object):
"""
A delayer for Pulsar broker passing the sent message to the downstream consumer. This is only
works in :data:`SubscriptionType.Shared` subscription.
Read delayed message delivery
https://pulsar.apache.org/docs/en/next/concepts-messaging/#delayed-message-delivery for better
understanding this feature.
"""
def __init__(self, _j_message_delayer):
self._j_message_delayer = _j_message_delayer
@staticmethod
def never() -> 'MessageDelayer':
"""
All the messages should be consumed immediately.
"""
JMessageDelayer = get_gateway().jvm \
.org.apache.flink.connector.pulsar.sink.writer.delayer.MessageDelayer
return MessageDelayer(JMessageDelayer.never())
@staticmethod
def fixed(duration: Duration) -> 'MessageDelayer':
"""
All the messages should be consumed in a fixed duration.
"""
JMessageDelayer = get_gateway().jvm \
.org.apache.flink.connector.pulsar.sink.writer.delayer.MessageDelayer
return MessageDelayer(JMessageDelayer.fixed(duration._j_duration))
class PulsarSink(Sink):
"""
The Sink implementation of Pulsar. Please use a PulsarSinkBuilder to construct a
PulsarSink. The following example shows how to create a PulsarSink receiving records of
String type.
Example:
::
>>> sink = PulsarSink.builder() \\
... .set_service_url(PULSAR_BROKER_URL) \\
... .set_admin_url(PULSAR_BROKER_HTTP_URL) \\
... .set_topics(topic) \\
... .set_serialization_schema(SimpleStringSchema()) \\
... .build()
The sink supports all delivery guarantees described by DeliveryGuarantee.
DeliveryGuarantee#NONE does not provide any guarantees: messages may be lost in
case of issues on the Pulsar broker and messages may be duplicated in case of a Flink
failure.
DeliveryGuarantee#AT_LEAST_ONCE the sink will wait for all outstanding records in
the Pulsar buffers to be acknowledged by the Pulsar producer on a checkpoint. No messages
will be lost in case of any issue with the Pulsar brokers but messages may be duplicated
when Flink restarts.
DeliveryGuarantee#EXACTLY_ONCE: In this mode the PulsarSink will write all messages
in a Pulsar transaction that will be committed to Pulsar on a checkpoint. Thus, no
duplicates will be seen in case of a Flink restart. However, this delays record writing
effectively until a checkpoint is written, so adjust the checkpoint duration accordingly.
Additionally, it is highly recommended to tweak Pulsar transaction timeout (link) >>
maximum checkpoint duration + maximum restart duration or data loss may happen when Pulsar
expires an uncommitted transaction.
See PulsarSinkBuilder for more details.
"""
def __init__(self, j_pulsar_sink):
super(PulsarSink, self).__init__(sink=j_pulsar_sink)
@staticmethod
def builder() -> 'PulsarSinkBuilder':
"""
Get a PulsarSinkBuilder to builder a PulsarSink.
"""
return PulsarSinkBuilder()
class PulsarSinkBuilder(object):
"""
The builder class for PulsarSink to make it easier for the users to construct a PulsarSink.
The following example shows the minimum setup to create a PulsarSink that reads the String
values from a Pulsar topic.
Example:
::
>>> sink = PulsarSink.builder() \\
... .set_service_url(PULSAR_BROKER_URL) \\
... .set_admin_url(PULSAR_BROKER_HTTP_URL) \\
... .set_topics([TOPIC1, TOPIC2]) \\
... .set_serialization_schema(SimpleStringSchema()) \\
... .build()
The service url, admin url, and the record serializer are required fields that must be set. If
you don't set the topics, make sure you have provided a custom TopicRouter. Otherwise,
you must provide the topics to produce.
To specify the delivery guarantees of PulsarSink, one can call
#setDeliveryGuarantee(DeliveryGuarantee). The default value of the delivery guarantee is
DeliveryGuarantee#NONE, and it wouldn't promise the consistence when write the message into
Pulsar.
Example:
::
>>> sink = PulsarSink.builder() \\
... .set_service_url(PULSAR_BROKER_URL) \\
... .set_admin_url(PULSAR_BROKER_HTTP_URL) \\
... .set_topics([TOPIC1, TOPIC2]) \\
... .set_serialization_schema(SimpleStringSchema()) \\
... .set_delivery_guarantee(DeliveryGuarantee.EXACTLY_ONCE)
... .build()
"""
def __init__(self):
JPulsarSink = get_gateway().jvm.org.apache.flink.connector.pulsar.sink.PulsarSink
self._j_pulsar_sink_builder = JPulsarSink.builder()
def set_admin_url(self, admin_url: str) -> 'PulsarSinkBuilder':
"""
Sets the admin endpoint for the PulsarAdmin of the PulsarSink.
"""
self._j_pulsar_sink_builder.setAdminUrl(admin_url)
return self
def set_service_url(self, service_url: str) -> 'PulsarSinkBuilder':
"""
Sets the server's link for the PulsarProducer of the PulsarSink.
"""
self._j_pulsar_sink_builder.setServiceUrl(service_url)
return self
def set_producer_name(self, producer_name: str) -> 'PulsarSinkBuilder':
"""
The producer name is informative, and it can be used to identify a particular producer
instance from the topic stats.
"""
self._j_pulsar_sink_builder.setProducerName(producer_name)
return self
def set_topics(self, topics: Union[str, List[str]]) -> 'PulsarSinkBuilder':
"""
Set a pulsar topic list for flink sink. Some topic may not exist currently, write to this
non-existed topic wouldn't throw any exception.
"""
if not isinstance(topics, list):
topics = [topics]
self._j_pulsar_sink_builder.setTopics(topics)
return self
def set_delivery_guarantee(self, delivery_guarantee: DeliveryGuarantee) -> 'PulsarSinkBuilder':
"""
Sets the wanted the DeliveryGuarantee. The default delivery guarantee is
DeliveryGuarantee#NONE.
"""
self._j_pulsar_sink_builder.setDeliveryGuarantee(
delivery_guarantee._to_j_delivery_guarantee())
return self
def set_topic_routing_mode(self, topic_routing_mode: TopicRoutingMode) -> 'PulsarSinkBuilder':
"""
Set a routing mode for choosing right topic partition to send messages.
"""
self._j_pulsar_sink_builder.setTopicRoutingMode(
topic_routing_mode._to_j_topic_routing_mode())
return self
def set_topic_router(self, topic_router_class_name: str) -> 'PulsarSinkBuilder':
"""
Use a custom topic router instead predefine topic routing.
"""
j_topic_router = load_java_class(topic_router_class_name).newInstance()
self._j_pulsar_sink_builder.setTopicRouter(j_topic_router)
return self
def set_serialization_schema(self, serialization_schema: SerializationSchema) \
-> 'PulsarSinkBuilder':
"""
Sets the SerializationSchema of the PulsarSinkBuilder.
"""
self._j_pulsar_sink_builder.setSerializationSchema(
serialization_schema._j_serialization_schema)
return self
def set_authentication(self,
auth_plugin_class_name: str,
auth_params_string: Union[str, Dict[str, str]]) \
-> 'PulsarSinkBuilder':
"""
Configure the authentication provider to use in the Pulsar client instance.
:param auth_plugin_class_name: Name of the Authentication-Plugin you want to use.
:param auth_params_string: String which represents parameters for the Authentication-Plugin,
e.g., "key1:val1,key2:val2".
.. versionadded:: 1.17.2
"""
if isinstance(auth_params_string, str):
self._j_pulsar_sink_builder.setAuthentication(
auth_plugin_class_name, auth_params_string)
else:
j_auth_params_map = get_gateway().jvm.java.util.HashMap()
for k, v in auth_params_string.items():
j_auth_params_map.put(k, v)
self._j_pulsar_sink_builder.setAuthentication(
auth_plugin_class_name, j_auth_params_map)
return self
def delay_sending_message(self, message_delayer: MessageDelayer) -> 'PulsarSinkBuilder':
"""
Set a message delayer for enable Pulsar message delay delivery.
"""
self._j_pulsar_sink_builder.delaySendingMessage(message_delayer._j_message_delayer)
return self
def set_config(self, key: str, value) -> 'PulsarSinkBuilder':
"""
Set an arbitrary property for the PulsarSink and Pulsar Producer. The valid keys can be
found in PulsarSinkOptions and PulsarOptions.
Make sure the option could be set only once or with same value.
"""
j_config_option = ConfigOptions.key(key).string_type().no_default_value()._j_config_option
self._j_pulsar_sink_builder.setConfig(j_config_option, value)
return self
def set_properties(self, config: Dict) -> 'PulsarSinkBuilder':
"""
Set an arbitrary property for the PulsarSink and Pulsar Producer. The valid keys can be
found in PulsarSinkOptions and PulsarOptions.
"""
JConfiguration = get_gateway().jvm.org.apache.flink.configuration.Configuration
self._j_pulsar_sink_builder.setConfig(JConfiguration.fromMap(config))
return self
def build(self) -> 'PulsarSink':
"""
Build the PulsarSink.
"""
return PulsarSink(self._j_pulsar_sink_builder.build())
| 34,324 | 40.80877 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/kinesis.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict, Union, List
from pyflink.common import SerializationSchema, DeserializationSchema, \
AssignerWithPeriodicWatermarksWrapper
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.connectors import Sink
from pyflink.java_gateway import get_gateway
__all__ = [
'KinesisShardAssigner',
'KinesisDeserializationSchema',
'WatermarkTracker',
'PartitionKeyGenerator',
'FlinkKinesisConsumer',
'KinesisStreamsSink',
'KinesisStreamsSinkBuilder',
'KinesisFirehoseSink',
'KinesisFirehoseSinkBuilder'
]
# ---- KinesisSource ----
class KinesisShardAssigner(object):
"""
Utility to map Kinesis shards to Flink subtask indices. Users can provide a Java
KinesisShardAssigner in Python if they want to provide custom shared assigner.
"""
def __init__(self, j_kinesis_shard_assigner):
self._j_kinesis_shard_assigner = j_kinesis_shard_assigner
@staticmethod
def default_shard_assigner() -> 'KinesisShardAssigner':
"""
A Default KinesisShardAssigner that maps Kinesis shard hash-key ranges to Flink subtasks.
"""
return KinesisShardAssigner(get_gateway().jvm.org.apache.flink.streaming.connectors.
kinesis.internals.KinesisDataFetcher.DEFAULT_SHARD_ASSIGNER)
@staticmethod
def uniform_shard_assigner() -> 'KinesisShardAssigner':
"""
A KinesisShardAssigner that maps Kinesis shard hash-key ranges to Flink subtasks.
It creates a more uniform distribution of shards across subtasks than org.apache.flink. \
streaming.connectors.kinesis.internals.KinesisDataFetcher.DEFAULT_SHARD_ASSIGNER when the
Kinesis records in the stream have hash keys that are uniformly distributed over all
possible hash keys, which is the case if records have randomly-generated partition keys.
(This is the same assumption made if you use the Kinesis UpdateShardCount operation with
UNIFORM_SCALING.)
"""
return KinesisShardAssigner(get_gateway().jvm.org.apache.flink.streaming.connectors.
kinesis.util.UniformShardAssigner())
class KinesisDeserializationSchema(object):
"""
This is a deserialization schema specific for the Flink Kinesis Consumer. Different from the
basic DeserializationSchema, this schema offers additional Kinesis-specific information about
the record that may be useful to the user application.
"""
def __init__(self, j_kinesis_deserialization_schema):
self._j_kinesis_deserialization_schema = j_kinesis_deserialization_schema
class WatermarkTracker(object):
"""
The watermark tracker is responsible for aggregating watermarks across distributed operators.
It can be used for sub tasks of a single Flink source as well as multiple heterogeneous sources
or other operators.The class essentially functions like a distributed hash table that enclosing
operators can use to adopt their processing / IO rates
"""
def __init__(self, j_watermark_tracker):
self._j_watermark_tracker = j_watermark_tracker
@staticmethod
def job_manager_watermark_tracker(
aggregate_name: str, log_accumulator_interval_millis: int = -1) -> 'WatermarkTracker':
j_watermark_tracker = get_gateway().jvm.org.apache.flink.streaming.connectors.kinesis.util \
.JobManagerWatermarkTracker(aggregate_name, log_accumulator_interval_millis)
return WatermarkTracker(j_watermark_tracker)
class FlinkKinesisConsumer(SourceFunction):
"""
The Flink Kinesis Consumer is an exactly-once parallel streaming data source that subscribes to
multiple AWS Kinesis streams within the same AWS service region, and can handle resharding of
streams. Each subtask of the consumer is responsible for fetching data records from multiple
Kinesis shards. The number of shards fetched by each subtask will change as shards are closed
and created by Kinesis.
To leverage Flink's checkpointing mechanics for exactly-once streaming processing guarantees,
the Flink Kinesis consumer is implemented with the AWS Java SDK, instead of the officially
recommended AWS Kinesis Client Library, for low-level control on the management of stream state.
The Flink Kinesis Connector also supports setting the initial starting points of Kinesis
streams, namely TRIM_HORIZON and LATEST.
Kinesis and the Flink consumer support dynamic re-sharding and shard IDs, while sequential,
cannot be assumed to be consecutive. There is no perfect generic default assignment function.
Default shard to subtask assignment, which is based on hash code, may result in skew, with some
subtasks having many shards assigned and others none.
It is recommended to monitor the shard distribution and adjust assignment appropriately.
A custom assigner implementation can be set via setShardAssigner(KinesisShardAssigner) to
optimize the hash function or use static overrides to limit skew.
In order for the consumer to emit watermarks, a timestamp assigner needs to be set via
setPeriodicWatermarkAssigner(AssignerWithPeriodicWatermarks) and the auto watermark emit
interval configured via ExecutionConfig.setAutoWatermarkInterval(long).
Watermarks can only advance when all shards of a subtask continuously deliver records.
To avoid an inactive or closed shard to block the watermark progress, the idle timeout should
be configured via configuration property ConsumerConfigConstants.SHARD_IDLE_INTERVAL_MILLIS.
By default, shards won't be considered idle and watermark calculation will wait for newer
records to arrive from all shards.
Note that re-sharding of the Kinesis stream while an application (that relies on the Kinesis
records for watermarking) is running can lead to incorrect late events. This depends on how
shards are assigned to subtasks and applies regardless of whether watermarks are generated in
the source or a downstream operator.
"""
def __init__(self,
streams: Union[str, List[str]],
deserializer: Union[DeserializationSchema, KinesisDeserializationSchema],
config_props: Dict
):
gateway = get_gateway()
j_properties = gateway.jvm.java.util.Properties()
for key, value in config_props.items():
j_properties.setProperty(key, value)
JFlinkKinesisConsumer = gateway.jvm.org.apache.flink.streaming.connectors.kinesis. \
FlinkKinesisConsumer
JKinesisDeserializationSchemaWrapper = get_gateway().jvm.org.apache.flink.streaming. \
connectors.kinesis.serialization.KinesisDeserializationSchemaWrapper
if isinstance(streams, str):
streams = [streams]
if isinstance(deserializer, DeserializationSchema):
deserializer = JKinesisDeserializationSchemaWrapper(
deserializer._j_deserialization_schema)
self._j_kinesis_consumer = JFlinkKinesisConsumer(streams, deserializer, j_properties)
super(FlinkKinesisConsumer, self).__init__(self._j_kinesis_consumer)
def set_shard_assigner(self, shard_assigner: KinesisShardAssigner) -> 'FlinkKinesisConsumer':
"""
Provide a custom assigner to influence how shards are distributed over subtasks.
"""
self._j_kinesis_consumer.setShardAssigner(shard_assigner._j_kinesis_shard_assigner)
return self
def set_periodic_watermark_assigner(
self,
periodic_watermark_assigner: AssignerWithPeriodicWatermarksWrapper) \
-> 'FlinkKinesisConsumer':
"""
Set the assigner that will extract the timestamp from T and calculate the watermark.
"""
self._j_kinesis_consumer.setPeriodicWatermarkAssigner(
periodic_watermark_assigner._j_assigner_with_periodic_watermarks)
return self
def set_watermark_tracker(self, watermark_tracker: WatermarkTracker) -> 'FlinkKinesisConsumer':
"""
Set the global watermark tracker. When set, it will be used by the fetcher to align the
shard consumers by event time.
"""
self._j_kinesis_consumer.setWatermarkTracker(watermark_tracker._j_watermark_tracker)
return self
# ---- KinesisSink ----
class PartitionKeyGenerator(object):
"""
This is a generator convert from an input element to the partition key, a string.
"""
def __init__(self, j_partition_key_generator):
self._j_partition_key_generator = j_partition_key_generator
@staticmethod
def fixed() -> 'PartitionKeyGenerator':
"""
A partitioner ensuring that each internal Flink partition ends up in the same Kinesis
partition. This is achieved by using the index of the producer task as a PartitionKey.
"""
return PartitionKeyGenerator(get_gateway().jvm.org.apache.flink.connector.kinesis.table.
FixedKinesisPartitionKeyGenerator())
@staticmethod
def random() -> 'PartitionKeyGenerator':
"""
A PartitionKeyGenerator that maps an arbitrary input element to a random partition ID.
"""
return PartitionKeyGenerator(get_gateway().jvm.org.apache.flink.connector.kinesis.table.
RandomKinesisPartitionKeyGenerator())
class KinesisStreamsSink(Sink):
"""
A Kinesis Data Streams (KDS) Sink that performs async requests against a destination stream
using the buffering protocol.
The sink internally uses a software.amazon.awssdk.services.kinesis.KinesisAsyncClient to
communicate with the AWS endpoint.
The behaviour of the buffering may be specified by providing configuration during the sink
build time.
- maxBatchSize: the maximum size of a batch of entries that may be sent to KDS
- maxInFlightRequests: the maximum number of in flight requests that may exist, if any more in
flight requests need to be initiated once the maximum has been reached, then it will be
blocked until some have completed
- maxBufferedRequests: the maximum number of elements held in the buffer, requests to add
elements will be blocked while the number of elements in the buffer is at the maximum
- maxBatchSizeInBytes: the maximum size of a batch of entries that may be sent to KDS
measured in bytes
- maxTimeInBufferMS: the maximum amount of time an entry is allowed to live in the buffer,
if any element reaches this age, the entire buffer will be flushed immediately
- maxRecordSizeInBytes: the maximum size of a record the sink will accept into the buffer,
a record of size larger than this will be rejected when passed to the sink
- failOnError: when an exception is encountered while persisting to Kinesis Data Streams,
the job will fail immediately if failOnError is set
"""
def __init__(self, j_kinesis_streams_sink):
super(KinesisStreamsSink, self).__init__(sink=j_kinesis_streams_sink)
@staticmethod
def builder() -> 'KinesisStreamsSinkBuilder':
return KinesisStreamsSinkBuilder()
class KinesisStreamsSinkBuilder(object):
"""
Builder to construct KinesisStreamsSink.
The following example shows the minimum setup to create a KinesisStreamsSink that writes String
values to a Kinesis Data Streams stream named your_stream_here.
Example:
::
>>> from pyflink.common.serialization import SimpleStringSchema
>>> sink_properties = {"aws.region": "eu-west-1"}
>>> sink = KinesisStreamsSink.builder() \\
... .set_kinesis_client_properties(sink_properties) \\
... .set_stream_name("your_stream_name") \\
... .set_serialization_schema(SimpleStringSchema()) \\
... .set_partition_key_generator(PartitionKeyGenerator.random()) \\
... .build()
If the following parameters are not set in this builder, the following defaults will be used:
- maxBatchSize will be 500
- maxInFlightRequests will be 50
- maxBufferedRequests will be 10000
- maxBatchSizeInBytes will be 5 MB i.e. 5 * 1024 * 1024
- maxTimeInBufferMS will be 5000ms
- maxRecordSizeInBytes will be 1 MB i.e. 1 * 1024 * 1024
- failOnError will be false
"""
def __init__(self):
JKinesisStreamsSink = get_gateway().jvm.org.apache.flink.connector.kinesis.sink.\
KinesisStreamsSink
self._j_kinesis_sink_builder = JKinesisStreamsSink.builder()
def set_stream_name(self, stream_name: Union[str, List[str]]) -> 'KinesisStreamsSinkBuilder':
"""
Sets the name of the KDS stream that the sink will connect to. There is no default for this
parameter, therefore, this must be provided at sink creation time otherwise the build will
fail.
"""
self._j_kinesis_sink_builder.setStreamName(stream_name)
return self
def set_serialization_schema(self, serialization_schema: SerializationSchema) \
-> 'KinesisStreamsSinkBuilder':
"""
Sets the SerializationSchema of the KinesisSinkBuilder.
"""
self._j_kinesis_sink_builder.setSerializationSchema(
serialization_schema._j_serialization_schema)
return self
def set_partition_key_generator(self, partition_key_generator: PartitionKeyGenerator) \
-> 'KinesisStreamsSinkBuilder':
"""
Sets the PartitionKeyGenerator of the KinesisSinkBuilder.
"""
self._j_kinesis_sink_builder.setPartitionKeyGenerator(
partition_key_generator._j_partition_key_generator)
return self
def set_fail_on_error(self, fail_on_error: bool) -> 'KinesisStreamsSinkBuilder':
"""
Sets the failOnError of the KinesisSinkBuilder. If failOnError is on, then a runtime
exception will be raised. Otherwise, those records will be requested in the buffer for
retry.
"""
self._j_kinesis_sink_builder.setFailOnError(fail_on_error)
return self
def set_kinesis_client_properties(self, kinesis_client_properties: Dict) \
-> 'KinesisStreamsSinkBuilder':
"""
Sets the kinesisClientProperties of the KinesisSinkBuilder.
"""
j_properties = get_gateway().jvm.java.util.Properties()
for key, value in kinesis_client_properties.items():
j_properties.setProperty(key, value)
self._j_kinesis_sink_builder.setKinesisClientProperties(j_properties)
return self
def set_max_batch_size(self, max_batch_size: int) -> 'KinesisStreamsSinkBuilder':
"""
Maximum number of elements that may be passed in a list to be written downstream.
"""
self._j_kinesis_sink_builder.setMaxBatchSize(max_batch_size)
return self
def set_max_in_flight_requests(self, max_in_flight_requests: int) \
-> 'KinesisStreamsSinkBuilder':
"""
Maximum number of uncompleted calls to submitRequestEntries that the SinkWriter will allow
at any given point. Once this point has reached, writes and callbacks to add elements to
the buffer may block until one or more requests to submitRequestEntries completes.
"""
self._j_kinesis_sink_builder.setMaxInFlightRequests(max_in_flight_requests)
return self
def set_max_buffered_requests(self, max_buffered_requests: int) -> 'KinesisStreamsSinkBuilder':
"""
The maximum buffer length. Callbacks to add elements to the buffer and calls to write will
block if this length has been reached and will only unblock if elements from the buffer have
been removed for flushing.
"""
self._j_kinesis_sink_builder.setMaxBufferedRequests(max_buffered_requests)
return self
def set_max_batch_size_in_bytes(self, max_batch_size_in_bytes: int) \
-> 'KinesisStreamsSinkBuilder':
"""
The flush will be attempted if the most recent call to write introduces an element to the
buffer such that the total size of the buffer is greater than or equal to this threshold
value. If this happens, the maximum number of elements from the head of the buffer will be
selected, that is smaller than maxBatchSizeInBytes in size will be flushed.
"""
self._j_kinesis_sink_builder.setMaxBatchSizeInBytes(max_batch_size_in_bytes)
return self
def set_max_time_in_buffer_ms(self, max_time_in_buffer_ms: int) -> 'KinesisStreamsSinkBuilder':
"""
The maximum amount of time an element may remain in the buffer. In most cases elements are
flushed as a result of the batch size (in bytes or number) being reached or during a
snapshot. However, there are scenarios where an element may remain in the buffer forever or
a long period of time. To mitigate this, a timer is constantly active in the buffer such
that: while the buffer is not empty, it will flush every maxTimeInBufferMS milliseconds.
"""
self._j_kinesis_sink_builder.setMaxTimeInBufferMS(max_time_in_buffer_ms)
return self
def set_max_record_size_in_bytes(self, max_record_size_in_bytes: int) \
-> 'KinesisStreamsSinkBuilder':
"""
The maximum size of each records in bytes. If a record larger than this is passed to the
sink, it will throw an IllegalArgumentException.
"""
self._j_kinesis_sink_builder.setMaxRecordSizeInBytes(max_record_size_in_bytes)
return self
def build(self) -> 'KinesisStreamsSink':
"""
Build thd KinesisStreamsSink.
"""
return KinesisStreamsSink(self._j_kinesis_sink_builder.build())
class KinesisFirehoseSink(Sink):
"""
A Kinesis Data Firehose (KDF) Sink that performs async requests against a destination delivery
stream using the buffering protocol.
"""
def __init__(self, j_kinesis_firehose_sink):
super(KinesisFirehoseSink, self).__init__(sink=j_kinesis_firehose_sink)
@staticmethod
def builder() -> 'KinesisFirehoseSinkBuilder':
return KinesisFirehoseSinkBuilder()
class KinesisFirehoseSinkBuilder(object):
"""
Builder to construct KinesisFirehoseSink.
The following example shows the minimum setup to create a KinesisFirehoseSink that writes
String values to a Kinesis Data Firehose delivery stream named delivery-stream-name.
Example:
::
>>> from pyflink.common.serialization import SimpleStringSchema
>>> sink_properties = {"aws.region": "eu-west-1"}
>>> sink = KinesisFirehoseSink.builder() \\
... .set_firehose_client_properties(sink_properties) \\
... .set_delivery_stream_name("delivery-stream-name") \\
... .set_serialization_schema(SimpleStringSchema()) \\
... .set_max_batch_size(20) \\
... .build()
If the following parameters are not set in this builder, the following defaults will be used:
- maxBatchSize will be 500
- maxInFlightRequests will be 50
- maxBufferedRequests will be 10000
- maxBatchSizeInBytes will be 4 MB i.e. 4 * 1024 * 1024
- maxTimeInBufferMS will be 5000ms
- maxRecordSizeInBytes will be 1000 KB i.e. 1000 * 1024
- failOnError will be false
"""
def __init__(self):
JKinesisFirehoseSink = get_gateway().jvm.org.apache.flink.connector.firehose.sink. \
KinesisFirehoseSink
self._j_kinesis_sink_builder = JKinesisFirehoseSink.builder()
def set_delivery_stream_name(self, delivery_stream_name: str) -> 'KinesisFirehoseSinkBuilder':
"""
Sets the name of the KDF delivery stream that the sink will connect to. There is no default
for this parameter, therefore, this must be provided at sink creation time otherwise the
build will fail.
"""
self._j_kinesis_sink_builder.setDeliveryStreamName(delivery_stream_name)
return self
def set_serialization_schema(self, serialization_schema: SerializationSchema) \
-> 'KinesisFirehoseSinkBuilder':
"""
Allows the user to specify a serialization schema to serialize each record to persist to
Firehose.
"""
self._j_kinesis_sink_builder.setSerializationSchema(
serialization_schema._j_serialization_schema)
return self
def set_fail_on_error(self, fail_on_error: bool) -> 'KinesisFirehoseSinkBuilder':
"""
If writing to Kinesis Data Firehose results in a partial or full failure being returned,
the job will fail
"""
self._j_kinesis_sink_builder.setFailOnError(fail_on_error)
return self
def set_firehose_client_properties(self, firehose_client_properties: Dict) \
-> 'KinesisFirehoseSinkBuilder':
"""
A set of properties used by the sink to create the firehose client. This may be used to set
the aws region, credentials etc. See the docs for usage and syntax.
"""
j_properties = get_gateway().jvm.java.util.Properties()
for key, value in firehose_client_properties.items():
j_properties.setProperty(key, value)
self._j_kinesis_sink_builder.setFirehoseClientProperties(j_properties)
return self
def set_max_batch_size(self, max_batch_size: int) -> 'KinesisFirehoseSinkBuilder':
"""
Maximum number of elements that may be passed in a list to be written downstream.
"""
self._j_kinesis_sink_builder.setMaxBatchSize(max_batch_size)
return self
def set_max_in_flight_requests(self, max_in_flight_requests: int) \
-> 'KinesisFirehoseSinkBuilder':
"""
Maximum number of uncompleted calls to submitRequestEntries that the SinkWriter will allow
at any given point. Once this point has reached, writes and callbacks to add elements to
the buffer may block until one or more requests to submitRequestEntries completes.
"""
self._j_kinesis_sink_builder.setMaxInFlightRequests(max_in_flight_requests)
return self
def set_max_buffered_requests(self, max_buffered_requests: int) -> 'KinesisFirehoseSinkBuilder':
"""
The maximum buffer length. Callbacks to add elements to the buffer and calls to write will
block if this length has been reached and will only unblock if elements from the buffer have
been removed for flushing.
"""
self._j_kinesis_sink_builder.setMaxBufferedRequests(max_buffered_requests)
return self
def set_max_batch_size_in_bytes(self, max_batch_size_in_bytes: int) \
-> 'KinesisFirehoseSinkBuilder':
"""
The flush will be attempted if the most recent call to write introduces an element to the
buffer such that the total size of the buffer is greater than or equal to this threshold
value. If this happens, the maximum number of elements from the head of the buffer will be
selected, that is smaller than maxBatchSizeInBytes in size will be flushed.
"""
self._j_kinesis_sink_builder.setMaxBatchSizeInBytes(max_batch_size_in_bytes)
return self
def set_max_time_in_buffer_ms(self, max_time_in_buffer_ms: int) -> 'KinesisFirehoseSinkBuilder':
"""
The maximum amount of time an element may remain in the buffer. In most cases elements are
flushed as a result of the batch size (in bytes or number) being reached or during a
snapshot. However, there are scenarios where an element may remain in the buffer forever or
a long period of time. To mitigate this, a timer is constantly active in the buffer such
that: while the buffer is not empty, it will flush every maxTimeInBufferMS milliseconds.
"""
self._j_kinesis_sink_builder.setMaxTimeInBufferMS(max_time_in_buffer_ms)
return self
def set_max_record_size_in_bytes(self, max_record_size_in_bytes: int) \
-> 'KinesisFirehoseSinkBuilder':
"""
The maximum size of each records in bytes. If a record larger than this is passed to the
sink, it will throw an IllegalArgumentException.
"""
self._j_kinesis_sink_builder.setMaxRecordSizeInBytes(max_record_size_in_bytes)
return self
def build(self) -> 'KinesisFirehoseSink':
"""
Build thd KinesisFirehoseSink.
"""
return KinesisFirehoseSink(self._j_kinesis_sink_builder.build())
| 25,715 | 45.84153 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/elasticsearch.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
from enum import Enum
from typing import List, Union
from pyflink.datastream.connectors import Sink, DeliveryGuarantee
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_jarray
__all__ = ['FlushBackoffType',
'ElasticsearchEmitter',
'Elasticsearch6SinkBuilder',
'Elasticsearch7SinkBuilder',
'ElasticsearchSink']
class FlushBackoffType(Enum):
"""
Used to control whether the sink should retry failed requests at all or with which kind back off
strategy.
:data: `CONSTANT`:
After every failure, it waits a configured time until the retries are exhausted.
:data: `EXPONENTIAL`:
After every failure, it waits initially the configured time and increases the waiting time
exponentially until the retries are exhausted.
:data: `NONE`:
The failure is not retried.
"""
CONSTANT = 0,
EXPONENTIAL = 1,
NONE = 2,
def _to_j_flush_backoff_type(self):
JFlushBackoffType = get_gateway().jvm \
.org.apache.flink.connector.elasticsearch.sink.FlushBackoffType
return getattr(JFlushBackoffType, self.name)
class ElasticsearchEmitter(object):
"""
Emitter which is used by sinks to prepare elements for sending them to Elasticsearch.
"""
def __init__(self, j_emitter):
self._j_emitter = j_emitter
@staticmethod
def static_index(index: str, key_field: str = None, doc_type: str = None) \
-> 'ElasticsearchEmitter':
"""
Creates an emitter with static index which is invoked on every record to convert it to
Elasticsearch actions.
"""
JMapElasticsearchEmitter = get_gateway().jvm \
.org.apache.flink.connector.elasticsearch.sink.MapElasticsearchEmitter
j_emitter = JMapElasticsearchEmitter(index, doc_type, key_field, False)
return ElasticsearchEmitter(j_emitter)
@staticmethod
def dynamic_index(index_field: str, key_field: str = None, doc_type: str = None) \
-> 'ElasticsearchEmitter':
"""
Creates an emitter with dynamic index which is invoked on every record to convert it to
Elasticsearch actions.
"""
JMapElasticsearchEmitter = get_gateway().jvm \
.org.apache.flink.connector.elasticsearch.sink.MapElasticsearchEmitter
j_emitter = JMapElasticsearchEmitter(index_field, doc_type, key_field, True)
return ElasticsearchEmitter(j_emitter)
class ElasticsearchSinkBuilderBase(abc.ABC):
"""
Base builder to construct a ElasticsearchSink.
"""
@abc.abstractmethod
def __init__(self):
self._j_elasticsearch_sink_builder = None
@abc.abstractmethod
def get_http_host_class(self):
"""
Gets the org.apache.http.HttpHost class which path is different in different Elasticsearch
version.
"""
pass
def set_emitter(self, emitter: ElasticsearchEmitter) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the emitter which is invoked on every record to convert it to Elasticsearch actions.
:param emitter: The emitter to process records into Elasticsearch actions.
"""
self._j_elasticsearch_sink_builder.setEmitter(emitter._j_emitter)
return self
def set_hosts(self, hosts: Union[str, List[str]]) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the hosts where the Elasticsearch cluster nodes are reachable.
"""
if not isinstance(hosts, list):
hosts = [hosts]
JHttpHost = self.get_http_host_class()
j_http_hosts_list = [JHttpHost.create(x) for x in hosts]
j_http_hosts_array = to_jarray(JHttpHost, j_http_hosts_list)
self._j_elasticsearch_sink_builder.setHosts(j_http_hosts_array)
return self
def set_delivery_guarantee(self, delivery_guarantee: DeliveryGuarantee) \
-> 'ElasticsearchSinkBuilderBase':
"""
Sets the wanted DeliveryGuarantee. The default delivery guarantee is DeliveryGuarantee#NONE
"""
j_delivery_guarantee = delivery_guarantee._to_j_delivery_guarantee()
self._j_elasticsearch_sink_builder.setDeliveryGuarantee(j_delivery_guarantee)
return self
def set_bulk_flush_max_actions(self, num_max_actions: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the maximum number of actions to buffer for each bulk request. You can pass -1 to
disable it. The default flush size 1000.
"""
self._j_elasticsearch_sink_builder.setBulkFlushMaxActions(num_max_actions)
return self
def set_bulk_flush_max_size_mb(self, max_size_mb: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the maximum size of buffered actions, in mb, per bulk request. You can pass -1 to
disable it.
"""
self._j_elasticsearch_sink_builder.setBulkFlushMaxSizeMb(max_size_mb)
return self
def set_bulk_flush_interval(self, interval_millis: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the bulk flush interval, in milliseconds. You can pass -1 to disable it.
"""
self._j_elasticsearch_sink_builder.setBulkFlushInterval(interval_millis)
return self
def set_bulk_flush_backoff_strategy(self,
flush_backoff_type: FlushBackoffType,
max_retries: int,
delay_millis: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the type of back off to use when flushing bulk requests. The default bulk flush back
off type is FlushBackoffType#NONE.
Sets the amount of delay between each backoff attempt when flushing bulk requests, in
milliseconds.
Sets the maximum number of retries for a backoff attempt when flushing bulk requests.
"""
self._j_elasticsearch_sink_builder.setBulkFlushBackoffStrategy(
flush_backoff_type._to_j_flush_backoff_type(), max_retries, delay_millis)
return self
def set_connection_username(self, username: str) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the username used to authenticate the connection with the Elasticsearch cluster.
"""
self._j_elasticsearch_sink_builder.setConnectionUsername(username)
return self
def set_connection_password(self, password: str) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the password used to authenticate the connection with the Elasticsearch cluster.
"""
self._j_elasticsearch_sink_builder.setConnectionPassword(password)
return self
def set_connection_path_prefix(self, prefix: str) -> 'ElasticsearchSinkBuilderBase':
"""
Sets a prefix which used for every REST communication to the Elasticsearch cluster.
"""
self._j_elasticsearch_sink_builder.setConnectionPathPrefix(prefix)
return self
def set_connection_request_timeout(self, timeout: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the timeout for requesting the connection of the Elasticsearch cluster from the
connection manager.
"""
self._j_elasticsearch_sink_builder.setConnectionRequestTimeout(timeout)
return self
def set_connection_timeout(self, timeout: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the timeout for establishing a connection of the Elasticsearch cluster.
"""
self._j_elasticsearch_sink_builder.setConnectionTimeout(timeout)
return self
def set_socket_timeout(self, timeout: int) -> 'ElasticsearchSinkBuilderBase':
"""
Sets the timeout for waiting for data or, put differently, a maximum period inactivity
between two consecutive data packets.
"""
self._j_elasticsearch_sink_builder.setSocketTimeout(timeout)
return self
def build(self) -> 'ElasticsearchSink':
"""
Constructs the ElasticsearchSink with the properties configured this builder.
"""
return ElasticsearchSink(self._j_elasticsearch_sink_builder.build())
class Elasticsearch6SinkBuilder(ElasticsearchSinkBuilderBase):
"""
Builder to construct an Elasticsearch 6 compatible ElasticsearchSink.
The following example shows the minimal setup to create a ElasticsearchSink that submits
actions on checkpoint or the default number of actions was buffered (1000).
Example:
::
>>> sink = Elasticsearch6SinkBuilder() \\
... .set_hosts('localhost:9200') \\
... .set_emitter(ElasticsearchEmitter.static_index("user", "key_col")) \\
... .build()
"""
def __init__(self):
self._j_elasticsearch_sink_builder = get_gateway().jvm \
.org.apache.flink.connector.elasticsearch.sink.Elasticsearch6SinkBuilder()
def get_http_host_class(self):
return get_gateway().jvm.org.apache.flink.elasticsearch6.shaded.org.apache.http.HttpHost
class Elasticsearch7SinkBuilder(ElasticsearchSinkBuilderBase):
"""
Builder to construct an Elasticsearch 7 compatible ElasticsearchSink.
The following example shows the minimal setup to create a ElasticsearchSink that submits
actions on checkpoint or the default number of actions was buffered (1000).
Example:
::
>>> sink = Elasticsearch7SinkBuilder() \\
... .set_hosts('localhost:9200') \\
... .set_emitter(ElasticsearchEmitter.dynamic_index("index_col", "key_col")) \\
... .build()
"""
def __init__(self):
self._j_elasticsearch_sink_builder = get_gateway().jvm \
.org.apache.flink.connector.elasticsearch.sink.Elasticsearch7SinkBuilder()
def get_http_host_class(self):
return get_gateway().jvm.org.apache.flink.elasticsearch7.shaded.org.apache.http.HttpHost
class ElasticsearchSink(Sink):
"""
Flink Sink to insert or update data in an Elasticsearch index. The sink supports the following
delivery guarantees.
DeliveryGuarantee.NONE does not provide any guarantees: actions are flushed to Elasticsearch
only depending on the configurations of the bulk processor. In case of a failure, it might
happen that actions are lost if the bulk processor still has buffered actions.
DeliveryGuarantee.AT_LEAST_ONCE on a checkpoint the sink will wait until all buffered actions
are flushed to and acknowledged by Elasticsearch. No actions will be lost but actions might be
sent to Elasticsearch multiple times when Flink restarts. These additional requests may cause
inconsistent data in ElasticSearch right after the restart, but eventually everything will be
consistent again.
"""
def __init__(self, j_elasticsearch_sink):
super(ElasticsearchSink, self).__init__(sink=j_elasticsearch_sink)
| 11,926 | 38.889632 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/number_seq.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.connectors import Source
from pyflink.java_gateway import get_gateway
__all__ = [
'NumberSequenceSource'
]
class NumberSequenceSource(Source):
"""
A data source that produces a sequence of numbers (longs). This source is useful for testing and
for cases that just need a stream of N events of any kind.
The source splits the sequence into as many parallel sub-sequences as there are parallel
source readers. Each sub-sequence will be produced in order. Consequently, if the parallelism is
limited to one, this will produce one sequence in order.
This source is always bounded. For very long sequences (for example over the entire domain of
long integer values), user may want to consider executing the application in a streaming manner,
because, despite the fact that the produced stream is bounded, the end bound is pretty far away.
"""
def __init__(self, start: int, end: int):
"""
Creates a new NumberSequenceSource that produces parallel sequences covering the
range start to end (both boundaries are inclusive).
"""
JNumberSequenceSource = get_gateway().jvm.org.apache.flink.api.connector.source.lib.\
NumberSequenceSource
j_seq_source = JNumberSequenceSource(start, end)
super(NumberSequenceSource, self).__init__(source=j_seq_source)
| 2,345 | 46.877551 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/jdbc.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.typeinfo import RowTypeInfo
from pyflink.datastream.functions import SinkFunction
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_jarray
__all__ = [
'JdbcSink',
'JdbcConnectionOptions',
'JdbcExecutionOptions'
]
class JdbcSink(SinkFunction):
def __init__(self, j_jdbc_sink):
super(JdbcSink, self).__init__(sink_func=j_jdbc_sink)
@staticmethod
def sink(sql: str, type_info: RowTypeInfo, jdbc_connection_options: 'JdbcConnectionOptions',
jdbc_execution_options: 'JdbcExecutionOptions' = None):
"""
Create a JDBC sink.
:param sql: arbitrary DML query (e.g. insert, update, upsert)
:param type_info: A RowTypeInfo for query field types.
:param jdbc_execution_options: parameters of execution, such as batch size and maximum
retries.
:param jdbc_connection_options: parameters of connection, such as JDBC URL.
:return: A JdbcSink.
"""
sql_types = []
gateway = get_gateway()
JJdbcTypeUtil = gateway.jvm.org.apache.flink.connector.jdbc.utils.JdbcTypeUtil
for field_type in type_info.get_field_types():
sql_types.append(JJdbcTypeUtil
.typeInformationToSqlType(field_type.get_java_type_info()))
j_sql_type = to_jarray(gateway.jvm.int, sql_types)
output_format_clz = gateway.jvm.Class\
.forName('org.apache.flink.connector.jdbc.internal.JdbcOutputFormat', False,
get_gateway().jvm.Thread.currentThread().getContextClassLoader())
j_int_array_type = to_jarray(gateway.jvm.int, []).getClass()
j_builder_method = output_format_clz.getDeclaredMethod('createRowJdbcStatementBuilder',
to_jarray(gateway.jvm.Class,
[j_int_array_type]))
j_builder_method.setAccessible(True)
j_statement_builder = j_builder_method.invoke(None, to_jarray(gateway.jvm.Object,
[j_sql_type]))
jdbc_execution_options = jdbc_execution_options if jdbc_execution_options is not None \
else JdbcExecutionOptions.defaults()
j_jdbc_sink = gateway.jvm.org.apache.flink.connector.jdbc.JdbcSink\
.sink(sql, j_statement_builder, jdbc_execution_options._j_jdbc_execution_options,
jdbc_connection_options._j_jdbc_connection_options)
return JdbcSink(j_jdbc_sink=j_jdbc_sink)
class JdbcConnectionOptions(object):
"""
JDBC connection options.
"""
def __init__(self, j_jdbc_connection_options):
self._j_jdbc_connection_options = j_jdbc_connection_options
def get_db_url(self) -> str:
return self._j_jdbc_connection_options.getDbURL()
def get_driver_name(self) -> str:
return self._j_jdbc_connection_options.getDriverName()
def get_user_name(self) -> str:
return self._j_jdbc_connection_options.getUsername()
def get_password(self) -> str:
return self._j_jdbc_connection_options.getPassword()
class JdbcConnectionOptionsBuilder(object):
"""
Builder for JdbcConnectionOptions.
"""
def __init__(self):
self._j_options_builder = get_gateway().jvm.org.apache.flink.connector\
.jdbc.JdbcConnectionOptions.JdbcConnectionOptionsBuilder()
def with_url(self, url: str) -> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withUrl(url)
return self
def with_driver_name(self, driver_name: str) \
-> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withDriverName(driver_name)
return self
def with_user_name(self, user_name: str) \
-> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withUsername(user_name)
return self
def with_password(self, password: str) \
-> 'JdbcConnectionOptions.JdbcConnectionOptionsBuilder':
self._j_options_builder.withPassword(password)
return self
def build(self) -> 'JdbcConnectionOptions':
return JdbcConnectionOptions(j_jdbc_connection_options=self._j_options_builder.build())
class JdbcExecutionOptions(object):
"""
JDBC sink batch options.
"""
def __init__(self, j_jdbc_execution_options):
self._j_jdbc_execution_options = j_jdbc_execution_options
def get_batch_interval_ms(self) -> int:
return self._j_jdbc_execution_options.getBatchIntervalMs()
def get_batch_size(self) -> int:
return self._j_jdbc_execution_options.getBatchSize()
def get_max_retries(self) -> int:
return self._j_jdbc_execution_options.getMaxRetries()
@staticmethod
def defaults() -> 'JdbcExecutionOptions':
return JdbcExecutionOptions(
j_jdbc_execution_options=get_gateway().jvm
.org.apache.flink.connector.jdbc.JdbcExecutionOptions.defaults())
@staticmethod
def builder() -> 'Builder':
return JdbcExecutionOptions.Builder()
class Builder(object):
"""
Builder for JdbcExecutionOptions.
"""
def __init__(self):
self._j_builder = get_gateway().jvm\
.org.apache.flink.connector.jdbc.JdbcExecutionOptions.builder()
def with_batch_size(self, size: int) -> 'JdbcExecutionOptions.Builder':
self._j_builder.withBatchSize(size)
return self
def with_batch_interval_ms(self, interval_ms: int) -> 'JdbcExecutionOptions.Builder':
self._j_builder.withBatchIntervalMs(interval_ms)
return self
def with_max_retries(self, max_retries: int) -> 'JdbcExecutionOptions.Builder':
self._j_builder.withMaxRetries(max_retries)
return self
def build(self) -> 'JdbcExecutionOptions':
return JdbcExecutionOptions(j_jdbc_execution_options=self._j_builder.build())
| 7,203 | 40.641618 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/rabbitmq.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import SerializationSchema, DeserializationSchema
from pyflink.datastream.functions import SinkFunction, SourceFunction
from pyflink.java_gateway import get_gateway
__all__ = [
'RMQConnectionConfig',
'RMQSource',
'RMQSink'
]
class RMQConnectionConfig(object):
"""
Connection Configuration for RMQ.
"""
def __init__(self, j_rmq_connection_config):
self._j_rmq_connection_config = j_rmq_connection_config
def get_host(self) -> str:
return self._j_rmq_connection_config.getHost()
def get_port(self) -> int:
return self._j_rmq_connection_config.getPort()
def get_virtual_host(self) -> str:
return self._j_rmq_connection_config.getVirtualHost()
def get_user_name(self) -> str:
return self._j_rmq_connection_config.getUsername()
def get_password(self) -> str:
return self._j_rmq_connection_config.getPassword()
def get_uri(self) -> str:
return self._j_rmq_connection_config.getUri()
def get_network_recovery_interval(self) -> int:
return self._j_rmq_connection_config.getNetworkRecoveryInterval()
def is_automatic_recovery(self) -> bool:
return self._j_rmq_connection_config.isAutomaticRecovery()
def is_topology_recovery(self) -> bool:
return self._j_rmq_connection_config.isTopologyRecovery()
def get_connection_timeout(self) -> int:
return self._j_rmq_connection_config.getConnectionTimeout()
def get_requested_channel_max(self) -> int:
return self._j_rmq_connection_config.getRequestedChannelMax()
def get_requested_frame_max(self) -> int:
return self._j_rmq_connection_config.getRequestedFrameMax()
def get_requested_heartbeat(self) -> int:
return self._j_rmq_connection_config.getRequestedHeartbeat()
class Builder(object):
"""
Builder for RMQConnectionConfig.
"""
def __init__(self):
self._j_options_builder = get_gateway().jvm.org.apache.flink.streaming.connectors\
.rabbitmq.common.RMQConnectionConfig.Builder()
def set_port(self, port: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setPort(port)
return self
def set_host(self, host: str) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setHost(host)
return self
def set_virtual_host(self, vhost: str) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setVirtualHost(vhost)
return self
def set_user_name(self, user_name: str) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setUserName(user_name)
return self
def set_password(self, password: str) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setPassword(password)
return self
def set_uri(self, uri: str) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setUri(uri)
return self
def set_topology_recovery_enabled(
self, topology_recovery_enabled: bool) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setTopologyRecoveryEnabled(topology_recovery_enabled)
return self
def set_requested_heartbeat(
self, requested_heartbeat: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setRequestedHeartbeat(requested_heartbeat)
return self
def set_requested_frame_max(
self, requested_frame_max: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setRequestedFrameMax(requested_frame_max)
return self
def set_requested_channel_max(
self, requested_channel_max: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setRequestedChannelMax(requested_channel_max)
return self
def set_network_recovery_interval(
self, network_recovery_interval: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setNetworkRecoveryInterval(network_recovery_interval)
return self
def set_connection_timeout(self, connection_timeout: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setConnectionTimeout(connection_timeout)
return self
def set_automatic_recovery(self, automatic_recovery: bool) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setAutomaticRecovery(automatic_recovery)
return self
def set_prefetch_count(self, prefetch_count: int) -> 'RMQConnectionConfig.Builder':
self._j_options_builder.setPrefetchCount(prefetch_count)
return self
def build(self) -> 'RMQConnectionConfig':
return RMQConnectionConfig(self._j_options_builder.build())
class RMQSource(SourceFunction):
def __init__(self,
connection_config: 'RMQConnectionConfig',
queue_name: str,
use_correlation_id: bool,
deserialization_schema: DeserializationSchema
):
"""
Creates a new RabbitMQ source.
For exactly-once, you must set the correlation ids of messages at the producer.
The correlation id must be unique. Otherwise the behavior of the source is undefined.
If in doubt, set use_correlation_id to False.
When correlation ids are not used, this source has at-least-once processing semantics
when checkpointing is enabled.
:param connection_config: The RabbiMQ connection configuration.
:param queue_name: The queue to receive messages from.
:param use_correlation_id: Whether the messages received are supplied with a unique id
to deduplicate messages (in case of failed acknowledgments).
Only used when checkpointing is enabled.
:param deserialization_schema: A deserializer used to convert between RabbitMQ's
messages and Flink's objects.
"""
JRMQSource = get_gateway().jvm.org.apache.flink.streaming.connectors.rabbitmq.RMQSource
j_rmq_source = JRMQSource(
connection_config._j_rmq_connection_config,
queue_name,
use_correlation_id,
deserialization_schema._j_deserialization_schema
)
super(RMQSource, self).__init__(source_func=j_rmq_source)
class RMQSink(SinkFunction):
def __init__(self, connection_config: 'RMQConnectionConfig',
queue_name: str, serialization_schema: SerializationSchema):
"""
Creates a new RabbitMQ sink.
:param connection_config: The RabbiMQ connection configuration.
:param queue_name: The queue to publish messages to.
:param serialization_schema: A serializer used to convert Flink objects to bytes.
"""
JRMQSink = get_gateway().jvm.org.apache.flink.streaming.connectors.rabbitmq.RMQSink
j_rmq_sink = JRMQSink(
connection_config._j_rmq_connection_config,
queue_name,
serialization_schema._j_serialization_schema,
)
super(RMQSink, self).__init__(sink_func=j_rmq_sink)
| 8,324 | 39.808824 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/hybrid_source.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from py4j.java_gateway import JavaObject
from pyflink.datastream.connectors import Source
from pyflink.java_gateway import get_gateway
__all__ = [
'HybridSource',
'HybridSourceBuilder'
]
class HybridSource(Source):
"""
Hybrid source that switches underlying sources based on configured source chain.
A simple example with FileSource and KafkaSource with fixed Kafka start position:
::
>>> file_source = FileSource \\
... .for_record_stream_format(StreamFormat.text_line_format(), test_dir) \\
... .build()
>>> kafka_source = KafkaSource \\
... .builder() \\
... .set_bootstrap_servers('localhost:9092') \\
... .set_group_id('MY_GROUP') \\
... .set_topics('quickstart-events') \\
... .set_value_only_deserializer(SimpleStringSchema()) \\
... .set_starting_offsets(KafkaOffsetsInitializer.earliest()) \\
... .build()
>>> hybrid_source = HybridSource.builder(file_source).add_source(kafka_source).build()
"""
def __init__(self, j_hybrid_source: JavaObject):
super(HybridSource, self).__init__(j_hybrid_source)
@staticmethod
def builder(first_source: Source) -> 'HybridSourceBuilder':
JHybridSource = get_gateway().jvm.org.apache.flink.connector.base.source.hybrid.HybridSource
return HybridSourceBuilder(JHybridSource.builder(first_source.get_java_function()))
class HybridSourceBuilder(object):
def __init__(self, j_hybrid_source_builder):
self._j_hybrid_source_builder = j_hybrid_source_builder
def add_source(self, source: Source) -> 'HybridSourceBuilder':
self._j_hybrid_source_builder.addSource(source.get_java_function())
return self
def build(self) -> 'HybridSource':
return HybridSource(self._j_hybrid_source_builder.build())
| 2,846 | 38.541667 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.connectors.base import Sink, Source, DeliveryGuarantee
__all__ = [
'Sink',
'Source',
'DeliveryGuarantee'
]
def _install():
from pyflink.datastream import connectors
# number_seq
from pyflink.datastream.connectors import number_seq
setattr(connectors, 'NumberSequenceSource', number_seq.NumberSequenceSource)
# jdbc
from pyflink.datastream.connectors import jdbc
setattr(connectors, 'JdbcSink', jdbc.JdbcSink)
setattr(connectors, 'JdbcConnectionOptions', jdbc.JdbcConnectionOptions)
setattr(connectors, 'JdbcExecutionOptions', jdbc.JdbcExecutionOptions)
# kafka
from pyflink.datastream.connectors import kafka
setattr(connectors, 'KafkaSource', kafka.KafkaSource)
setattr(connectors, 'FlinkKafkaConsumer', kafka.FlinkKafkaConsumer)
setattr(connectors, 'FlinkKafkaProducer', kafka.FlinkKafkaProducer)
setattr(connectors, 'Semantic', kafka.Semantic)
# pulsar
from pyflink.datastream.connectors import pulsar
setattr(connectors, 'PulsarSource', pulsar.PulsarSource)
setattr(connectors, 'PulsarSourceBuilder', pulsar.PulsarSourceBuilder)
setattr(connectors, 'StartCursor', pulsar.StartCursor)
setattr(connectors, 'StopCursor', pulsar.StopCursor)
# rabbitmq
from pyflink.datastream.connectors import rabbitmq
setattr(connectors, 'RMQSource', rabbitmq.RMQSource)
setattr(connectors, 'RMQSink', rabbitmq.RMQSink)
setattr(connectors, 'RMQConnectionConfig', rabbitmq.RMQConnectionConfig)
# filesystem
from pyflink.datastream.connectors import file_system
setattr(connectors, 'BucketAssigner', file_system.BucketAssigner)
setattr(connectors, 'FileEnumeratorProvider', file_system.FileEnumeratorProvider)
setattr(connectors, 'FileSink', file_system.FileSink)
setattr(connectors, 'FileSplitAssignerProvider', file_system.FileSplitAssignerProvider)
setattr(connectors, 'FileSource', file_system.FileSource)
setattr(connectors, 'FileSourceBuilder', file_system.FileSourceBuilder)
setattr(connectors, 'OutputFileConfig', file_system.OutputFileConfig)
setattr(connectors, 'RollingPolicy', file_system.RollingPolicy)
setattr(connectors, 'StreamFormat', file_system.StreamFormat)
setattr(connectors, 'StreamingFileSink', file_system.StreamingFileSink)
# for backward compatibility
_install()
del _install
| 3,344 | 41.884615 | 91 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_elasticsearch.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Types
from pyflink.datastream.connectors import DeliveryGuarantee
from pyflink.datastream.connectors.elasticsearch import Elasticsearch7SinkBuilder, \
FlushBackoffType, ElasticsearchEmitter
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
from pyflink.util.java_utils import get_field_value, is_instance_of
class FlinkElasticsearch7Test(PyFlinkStreamingTestCase):
def test_es_sink(self):
ds = self.env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es_sink = Elasticsearch7SinkBuilder() \
.set_emitter(ElasticsearchEmitter.static_index('foo', 'id')) \
.set_hosts(['localhost:9200']) \
.set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
.set_bulk_flush_max_actions(1) \
.set_bulk_flush_max_size_mb(2) \
.set_bulk_flush_interval(1000) \
.set_bulk_flush_backoff_strategy(FlushBackoffType.CONSTANT, 3, 3000) \
.set_connection_username('foo') \
.set_connection_password('bar') \
.set_connection_path_prefix('foo-bar') \
.set_connection_request_timeout(30000) \
.set_connection_timeout(31000) \
.set_socket_timeout(32000) \
.build()
j_emitter = get_field_value(es_sink.get_java_function(), 'emitter')
self.assertTrue(
is_instance_of(
j_emitter,
'org.apache.flink.connector.elasticsearch.sink.MapElasticsearchEmitter'))
self.assertEqual(
get_field_value(
es_sink.get_java_function(), 'hosts')[0].toString(), 'http://localhost:9200')
self.assertEqual(
get_field_value(
es_sink.get_java_function(), 'deliveryGuarantee').toString(), 'at-least-once')
j_build_bulk_processor_config = get_field_value(
es_sink.get_java_function(), 'buildBulkProcessorConfig')
self.assertEqual(j_build_bulk_processor_config.getBulkFlushMaxActions(), 1)
self.assertEqual(j_build_bulk_processor_config.getBulkFlushMaxMb(), 2)
self.assertEqual(j_build_bulk_processor_config.getBulkFlushInterval(), 1000)
self.assertEqual(j_build_bulk_processor_config.getFlushBackoffType().toString(), 'CONSTANT')
self.assertEqual(j_build_bulk_processor_config.getBulkFlushBackoffRetries(), 3)
self.assertEqual(j_build_bulk_processor_config.getBulkFlushBackOffDelay(), 3000)
j_network_client_config = get_field_value(
es_sink.get_java_function(), 'networkClientConfig')
self.assertEqual(j_network_client_config.getUsername(), 'foo')
self.assertEqual(j_network_client_config.getPassword(), 'bar')
self.assertEqual(j_network_client_config.getConnectionRequestTimeout(), 30000)
self.assertEqual(j_network_client_config.getConnectionTimeout(), 31000)
self.assertEqual(j_network_client_config.getSocketTimeout(), 32000)
self.assertEqual(j_network_client_config.getConnectionPathPrefix(), 'foo-bar')
ds.sink_to(es_sink).name('es sink')
def test_es_sink_dynamic(self):
ds = self.env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es_dynamic_index_sink = Elasticsearch7SinkBuilder() \
.set_emitter(ElasticsearchEmitter.dynamic_index('name', 'id')) \
.set_hosts(['localhost:9200']) \
.build()
j_emitter = get_field_value(es_dynamic_index_sink.get_java_function(), 'emitter')
self.assertTrue(
is_instance_of(
j_emitter,
'org.apache.flink.connector.elasticsearch.sink.MapElasticsearchEmitter'))
ds.sink_to(es_dynamic_index_sink).name('es dynamic index sink')
def test_es_sink_key_none(self):
ds = self.env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es_sink = Elasticsearch7SinkBuilder() \
.set_emitter(ElasticsearchEmitter.static_index('foo')) \
.set_hosts(['localhost:9200']) \
.build()
j_emitter = get_field_value(es_sink.get_java_function(), 'emitter')
self.assertTrue(
is_instance_of(
j_emitter,
'org.apache.flink.connector.elasticsearch.sink.MapElasticsearchEmitter'))
ds.sink_to(es_sink).name('es sink')
def test_es_sink_dynamic_key_none(self):
ds = self.env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es_dynamic_index_sink = Elasticsearch7SinkBuilder() \
.set_emitter(ElasticsearchEmitter.dynamic_index('name')) \
.set_hosts(['localhost:9200']) \
.build()
j_emitter = get_field_value(es_dynamic_index_sink.get_java_function(), 'emitter')
self.assertTrue(
is_instance_of(
j_emitter,
'org.apache.flink.connector.elasticsearch.sink.MapElasticsearchEmitter'))
ds.sink_to(es_dynamic_index_sink).name('es dynamic index sink')
| 6,370 | 46.544776 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_rabbitmq.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Types
from pyflink.datastream.connectors.rabbitmq import RMQSink, RMQSource, RMQConnectionConfig
from pyflink.datastream.formats.json import JsonRowDeserializationSchema, JsonRowSerializationSchema
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
from pyflink.util.java_utils import get_field_value
class RMQTest(PyFlinkStreamingTestCase):
def test_rabbitmq_connectors(self):
connection_config = RMQConnectionConfig.Builder() \
.set_host('localhost') \
.set_port(5672) \
.set_virtual_host('/') \
.set_user_name('guest') \
.set_password('guest') \
.build()
type_info = Types.ROW([Types.INT(), Types.STRING()])
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
rmq_source = RMQSource(
connection_config, 'source_queue', True, deserialization_schema)
self.assertEqual(
get_field_value(rmq_source.get_java_function(), 'queueName'), 'source_queue')
self.assertTrue(get_field_value(rmq_source.get_java_function(), 'usesCorrelationId'))
serialization_schema = JsonRowSerializationSchema.builder().with_type_info(type_info) \
.build()
rmq_sink = RMQSink(connection_config, 'sink_queue', serialization_schema)
self.assertEqual(
get_field_value(rmq_sink.get_java_function(), 'queueName'), 'sink_queue')
| 2,467 | 48.36 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_kafka.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
from typing import Dict
import pyflink.datastream.data_stream as data_stream
from pyflink.common import typeinfo
from pyflink.common.configuration import Configuration
from pyflink.common.serialization import SimpleStringSchema, DeserializationSchema
from pyflink.common.typeinfo import Types
from pyflink.common.types import Row
from pyflink.common.watermark_strategy import WatermarkStrategy
from pyflink.datastream.connectors.base import DeliveryGuarantee
from pyflink.datastream.connectors.kafka import KafkaSource, KafkaTopicPartition, \
KafkaOffsetsInitializer, KafkaOffsetResetStrategy, KafkaRecordSerializationSchema, KafkaSink, \
FlinkKafkaProducer, FlinkKafkaConsumer
from pyflink.datastream.formats.avro import AvroRowDeserializationSchema, AvroRowSerializationSchema
from pyflink.datastream.formats.csv import CsvRowDeserializationSchema, CsvRowSerializationSchema
from pyflink.datastream.formats.json import JsonRowDeserializationSchema, JsonRowSerializationSchema
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import (
PyFlinkStreamingTestCase,
PyFlinkTestCase,
invoke_java_object_method,
to_java_data_structure,
)
from pyflink.util.java_utils import to_jarray, is_instance_of, get_field_value
class KafkaSourceTests(PyFlinkStreamingTestCase):
def test_legacy_kafka_connector(self):
source_topic = 'test_source_topic'
sink_topic = 'test_sink_topic'
props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
type_info = Types.ROW([Types.INT(), Types.STRING()])
# Test for kafka consumer
deserialization_schema = JsonRowDeserializationSchema.builder() \
.type_info(type_info=type_info).build()
flink_kafka_consumer = FlinkKafkaConsumer(source_topic, deserialization_schema, props)
flink_kafka_consumer.set_start_from_earliest()
flink_kafka_consumer.set_commit_offsets_on_checkpoints(True)
j_properties = get_field_value(flink_kafka_consumer.get_java_function(), 'properties')
self.assertEqual('localhost:9092', j_properties.getProperty('bootstrap.servers'))
self.assertEqual('test_group', j_properties.getProperty('group.id'))
self.assertTrue(get_field_value(flink_kafka_consumer.get_java_function(),
'enableCommitOnCheckpoints'))
j_start_up_mode = get_field_value(flink_kafka_consumer.get_java_function(), 'startupMode')
j_deserializer = get_field_value(flink_kafka_consumer.get_java_function(), 'deserializer')
j_deserialize_type_info = invoke_java_object_method(j_deserializer, "getProducedType")
deserialize_type_info = typeinfo._from_java_type(j_deserialize_type_info)
self.assertTrue(deserialize_type_info == type_info)
self.assertTrue(j_start_up_mode.equals(get_gateway().jvm
.org.apache.flink.streaming.connectors
.kafka.config.StartupMode.EARLIEST))
j_topic_desc = get_field_value(flink_kafka_consumer.get_java_function(),
'topicsDescriptor')
j_topics = invoke_java_object_method(j_topic_desc, 'getFixedTopics')
self.assertEqual(['test_source_topic'], list(j_topics))
# Test for kafka producer
serialization_schema = JsonRowSerializationSchema.builder().with_type_info(type_info) \
.build()
flink_kafka_producer = FlinkKafkaProducer(sink_topic, serialization_schema, props)
flink_kafka_producer.set_write_timestamp_to_kafka(False)
j_producer_config = get_field_value(flink_kafka_producer.get_java_function(),
'producerConfig')
self.assertEqual('localhost:9092', j_producer_config.getProperty('bootstrap.servers'))
self.assertEqual('test_group', j_producer_config.getProperty('group.id'))
self.assertFalse(get_field_value(flink_kafka_producer.get_java_function(),
'writeTimestampToKafka'))
def test_compiling(self):
source = KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topics('test_topic') \
.set_value_only_deserializer(SimpleStringSchema()) \
.build()
ds = self.env.from_source(source=source,
watermark_strategy=WatermarkStrategy.for_monotonous_timestamps(),
source_name='kafka source')
ds.print()
plan = json.loads(self.env.get_execution_plan())
self.assertEqual('Source: kafka source', plan['nodes'][0]['type'])
def test_set_properties(self):
source = KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_group_id('test_group_id') \
.set_client_id_prefix('test_client_id_prefix') \
.set_property('test_property', 'test_value') \
.set_topics('test_topic') \
.set_value_only_deserializer(SimpleStringSchema()) \
.build()
conf = self._get_kafka_source_configuration(source)
self.assertEqual(conf.get_string('bootstrap.servers', ''), 'localhost:9092')
self.assertEqual(conf.get_string('group.id', ''), 'test_group_id')
self.assertEqual(conf.get_string('client.id.prefix', ''), 'test_client_id_prefix')
self.assertEqual(conf.get_string('test_property', ''), 'test_value')
def test_set_topics(self):
source = KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topics('test_topic1', 'test_topic2') \
.set_value_only_deserializer(SimpleStringSchema()) \
.build()
kafka_subscriber = get_field_value(source.get_java_function(), 'subscriber')
self.assertEqual(
kafka_subscriber.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.enumerator.subscriber.TopicListSubscriber'
)
topics = get_field_value(kafka_subscriber, 'topics')
self.assertTrue(is_instance_of(topics, get_gateway().jvm.java.util.List))
self.assertEqual(topics.size(), 2)
self.assertEqual(topics[0], 'test_topic1')
self.assertEqual(topics[1], 'test_topic2')
def test_set_topic_pattern(self):
source = KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topic_pattern('test_topic*') \
.set_value_only_deserializer(SimpleStringSchema()) \
.build()
kafka_subscriber = get_field_value(source.get_java_function(), 'subscriber')
self.assertEqual(
kafka_subscriber.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.enumerator.subscriber.TopicPatternSubscriber'
)
topic_pattern = get_field_value(kafka_subscriber, 'topicPattern')
self.assertTrue(is_instance_of(topic_pattern, get_gateway().jvm.java.util.regex.Pattern))
self.assertEqual(topic_pattern.toString(), 'test_topic*')
def test_set_partitions(self):
topic_partition_1 = KafkaTopicPartition('test_topic', 1)
topic_partition_2 = KafkaTopicPartition('test_topic', 2)
source = KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_partitions({topic_partition_1, topic_partition_2}) \
.set_value_only_deserializer(SimpleStringSchema()) \
.build()
kafka_subscriber = get_field_value(source.get_java_function(), 'subscriber')
self.assertEqual(
kafka_subscriber.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.enumerator.subscriber.PartitionSetSubscriber'
)
partitions = get_field_value(kafka_subscriber, 'subscribedPartitions')
self.assertTrue(is_instance_of(partitions, get_gateway().jvm.java.util.Set))
self.assertTrue(topic_partition_1._to_j_topic_partition() in partitions)
self.assertTrue(topic_partition_2._to_j_topic_partition() in partitions)
def test_set_starting_offsets(self):
def _build_source(initializer: KafkaOffsetsInitializer):
return KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topics('test_topic') \
.set_value_only_deserializer(SimpleStringSchema()) \
.set_group_id('test_group') \
.set_starting_offsets(initializer) \
.build()
self._check_reader_handled_offsets_initializer(
_build_source(KafkaOffsetsInitializer.latest()), -1, KafkaOffsetResetStrategy.LATEST
)
self._check_reader_handled_offsets_initializer(
_build_source(KafkaOffsetsInitializer.earliest()), -2,
KafkaOffsetResetStrategy.EARLIEST
)
self._check_reader_handled_offsets_initializer(
_build_source(KafkaOffsetsInitializer.committed_offsets()), -3,
KafkaOffsetResetStrategy.NONE
)
self._check_reader_handled_offsets_initializer(
_build_source(KafkaOffsetsInitializer.committed_offsets(
KafkaOffsetResetStrategy.LATEST
)), -3, KafkaOffsetResetStrategy.LATEST
)
self._check_timestamp_offsets_initializer(
_build_source(KafkaOffsetsInitializer.timestamp(100)), 100
)
specified_offsets = {
KafkaTopicPartition('test_topic1', 1): 1000,
KafkaTopicPartition('test_topic2', 2): 2000
}
self._check_specified_offsets_initializer(
_build_source(KafkaOffsetsInitializer.offsets(specified_offsets)), specified_offsets,
KafkaOffsetResetStrategy.EARLIEST
)
self._check_specified_offsets_initializer(
_build_source(KafkaOffsetsInitializer.offsets(
specified_offsets,
KafkaOffsetResetStrategy.LATEST
)),
specified_offsets,
KafkaOffsetResetStrategy.LATEST
)
def test_bounded(self):
def _build_source(initializer: KafkaOffsetsInitializer):
return KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topics('test_topic') \
.set_value_only_deserializer(SimpleStringSchema()) \
.set_group_id('test_group') \
.set_bounded(initializer) \
.build()
def _check_bounded(source: KafkaSource):
self.assertEqual(
get_field_value(source.get_java_function(), 'boundedness').toString(), 'BOUNDED'
)
self._test_set_bounded_or_unbounded(_build_source, _check_bounded)
def test_unbounded(self):
def _build_source(initializer: KafkaOffsetsInitializer):
return KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topics('test_topic') \
.set_value_only_deserializer(SimpleStringSchema()) \
.set_group_id('test_group') \
.set_unbounded(initializer) \
.build()
def _check_bounded(source: KafkaSource):
self.assertEqual(
get_field_value(source.get_java_function(), 'boundedness').toString(),
'CONTINUOUS_UNBOUNDED'
)
self._test_set_bounded_or_unbounded(_build_source, _check_bounded)
def _test_set_bounded_or_unbounded(self, _build_source, _check_boundedness):
source = _build_source(KafkaOffsetsInitializer.latest())
_check_boundedness(source)
self._check_reader_handled_offsets_initializer(
source, -1, KafkaOffsetResetStrategy.LATEST, False
)
source = _build_source(KafkaOffsetsInitializer.earliest())
_check_boundedness(source)
self._check_reader_handled_offsets_initializer(
source, -2, KafkaOffsetResetStrategy.EARLIEST, False
)
source = _build_source(KafkaOffsetsInitializer.committed_offsets())
_check_boundedness(source)
self._check_reader_handled_offsets_initializer(
source, -3, KafkaOffsetResetStrategy.NONE, False
)
source = _build_source(KafkaOffsetsInitializer.committed_offsets(
KafkaOffsetResetStrategy.LATEST
))
_check_boundedness(source)
self._check_reader_handled_offsets_initializer(
source, -3, KafkaOffsetResetStrategy.LATEST, False
)
source = _build_source(KafkaOffsetsInitializer.timestamp(100))
_check_boundedness(source)
self._check_timestamp_offsets_initializer(source, 100, False)
specified_offsets = {
KafkaTopicPartition('test_topic1', 1): 1000,
KafkaTopicPartition('test_topic2', 2): 2000
}
source = _build_source(KafkaOffsetsInitializer.offsets(specified_offsets))
_check_boundedness(source)
self._check_specified_offsets_initializer(
source, specified_offsets, KafkaOffsetResetStrategy.EARLIEST, False
)
source = _build_source(KafkaOffsetsInitializer.offsets(
specified_offsets,
KafkaOffsetResetStrategy.LATEST)
)
_check_boundedness(source)
self._check_specified_offsets_initializer(
source, specified_offsets, KafkaOffsetResetStrategy.LATEST, False
)
def test_set_value_only_deserializer(self):
def _check(schema: DeserializationSchema, class_name: str):
source = KafkaSource.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_topics('test_topic') \
.set_value_only_deserializer(schema) \
.build()
deserialization_schema_wrapper = get_field_value(source.get_java_function(),
'deserializationSchema')
self.assertEqual(
deserialization_schema_wrapper.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.reader.deserializer'
'.KafkaValueOnlyDeserializationSchemaWrapper'
)
deserialization_schema = get_field_value(deserialization_schema_wrapper,
'deserializationSchema')
self.assertEqual(deserialization_schema.getClass().getCanonicalName(),
class_name)
_check(SimpleStringSchema(), 'org.apache.flink.api.common.serialization.SimpleStringSchema')
_check(
JsonRowDeserializationSchema.builder().type_info(Types.ROW([Types.STRING()])).build(),
'org.apache.flink.formats.json.JsonRowDeserializationSchema'
)
_check(
CsvRowDeserializationSchema.Builder(Types.ROW([Types.STRING()])).build(),
'org.apache.flink.formats.csv.CsvRowDeserializationSchema'
)
avro_schema_string = """
{
"type": "record",
"name": "test_record",
"fields": []
}
"""
_check(
AvroRowDeserializationSchema(avro_schema_string=avro_schema_string),
'org.apache.flink.formats.avro.AvroRowDeserializationSchema'
)
def _check_reader_handled_offsets_initializer(self,
source: KafkaSource,
offset: int,
reset_strategy: KafkaOffsetResetStrategy,
is_start: bool = True):
if is_start:
field_name = 'startingOffsetsInitializer'
else:
field_name = 'stoppingOffsetsInitializer'
offsets_initializer = get_field_value(source.get_java_function(), field_name)
self.assertEqual(
offsets_initializer.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.enumerator.initializer'
'.ReaderHandledOffsetsInitializer'
)
starting_offset = get_field_value(offsets_initializer, 'startingOffset')
self.assertEqual(starting_offset, offset)
offset_reset_strategy = get_field_value(offsets_initializer, 'offsetResetStrategy')
self.assertTrue(
offset_reset_strategy.equals(reset_strategy._to_j_offset_reset_strategy())
)
def _check_timestamp_offsets_initializer(self,
source: KafkaSource,
timestamp: int,
is_start: bool = True):
if is_start:
field_name = 'startingOffsetsInitializer'
else:
field_name = 'stoppingOffsetsInitializer'
offsets_initializer = get_field_value(source.get_java_function(), field_name)
self.assertEqual(
offsets_initializer.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.enumerator.initializer'
'.TimestampOffsetsInitializer'
)
starting_timestamp = get_field_value(offsets_initializer, 'startingTimestamp')
self.assertEqual(starting_timestamp, timestamp)
def _check_specified_offsets_initializer(self,
source: KafkaSource,
offsets: Dict[KafkaTopicPartition, int],
reset_strategy: KafkaOffsetResetStrategy,
is_start: bool = True):
if is_start:
field_name = 'startingOffsetsInitializer'
else:
field_name = 'stoppingOffsetsInitializer'
offsets_initializer = get_field_value(source.get_java_function(), field_name)
self.assertEqual(
offsets_initializer.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.source.enumerator.initializer'
'.SpecifiedOffsetsInitializer'
)
initial_offsets = get_field_value(offsets_initializer, 'initialOffsets')
self.assertTrue(is_instance_of(initial_offsets, get_gateway().jvm.java.util.Map))
self.assertEqual(initial_offsets.size(), len(offsets))
for j_topic_partition in initial_offsets:
topic_partition = KafkaTopicPartition(j_topic_partition.topic(),
j_topic_partition.partition())
self.assertIsNotNone(offsets.get(topic_partition))
self.assertEqual(initial_offsets[j_topic_partition], offsets[topic_partition])
offset_reset_strategy = get_field_value(offsets_initializer, 'offsetResetStrategy')
self.assertTrue(
offset_reset_strategy.equals(reset_strategy._to_j_offset_reset_strategy())
)
@staticmethod
def _get_kafka_source_configuration(source: KafkaSource):
jvm = get_gateway().jvm
j_source = source.get_java_function()
j_to_configuration = j_source.getClass().getDeclaredMethod(
'getConfiguration', to_jarray(jvm.java.lang.Class, [])
)
j_to_configuration.setAccessible(True)
j_configuration = j_to_configuration.invoke(j_source, to_jarray(jvm.java.lang.Object, []))
return Configuration(j_configuration=j_configuration)
class KafkaSinkTests(PyFlinkStreamingTestCase):
def test_compile(self):
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_record_serializer(self._build_serialization_schema()) \
.build()
ds = self.env.from_collection([], type_info=Types.STRING())
ds.sink_to(sink)
plan = json.loads(self.env.get_execution_plan())
self.assertEqual(plan['nodes'][1]['type'], 'Sink: Writer')
self.assertEqual(plan['nodes'][2]['type'], 'Sink: Committer')
def test_set_bootstrap_severs(self):
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092,localhost:9093') \
.set_record_serializer(self._build_serialization_schema()) \
.build()
config = get_field_value(sink.get_java_function(), 'kafkaProducerConfig')
self.assertEqual(config.get('bootstrap.servers'), 'localhost:9092,localhost:9093')
def test_set_delivery_guarantee(self):
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_record_serializer(self._build_serialization_schema()) \
.build()
guarantee = get_field_value(sink.get_java_function(), 'deliveryGuarantee')
self.assertEqual(guarantee.toString(), 'none')
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
.set_record_serializer(self._build_serialization_schema()) \
.build()
guarantee = get_field_value(sink.get_java_function(), 'deliveryGuarantee')
self.assertEqual(guarantee.toString(), 'at-least-once')
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_delivery_guarantee(DeliveryGuarantee.EXACTLY_ONCE) \
.set_record_serializer(self._build_serialization_schema()) \
.build()
guarantee = get_field_value(sink.get_java_function(), 'deliveryGuarantee')
self.assertEqual(guarantee.toString(), 'exactly-once')
def test_set_transactional_id_prefix(self):
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_transactional_id_prefix('test-prefix') \
.set_record_serializer(self._build_serialization_schema()) \
.build()
prefix = get_field_value(sink.get_java_function(), 'transactionalIdPrefix')
self.assertEqual(prefix, 'test-prefix')
def test_set_property(self):
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_record_serializer(self._build_serialization_schema()) \
.set_property('test-key', 'test-value') \
.build()
config = get_field_value(sink.get_java_function(), 'kafkaProducerConfig')
self.assertEqual(config.get('test-key'), 'test-value')
def test_set_record_serializer(self):
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_record_serializer(self._build_serialization_schema()) \
.build()
serializer = get_field_value(sink.get_java_function(), 'recordSerializer')
self.assertEqual(serializer.getClass().getCanonicalName(),
'org.apache.flink.connector.kafka.sink.'
'KafkaRecordSerializationSchemaBuilder.'
'KafkaRecordSerializationSchemaWrapper')
topic_selector = get_field_value(serializer, 'topicSelector')
self.assertEqual(topic_selector.apply(None), 'test-topic')
value_serializer = get_field_value(serializer, 'valueSerializationSchema')
self.assertEqual(value_serializer.getClass().getCanonicalName(),
'org.apache.flink.api.common.serialization.SimpleStringSchema')
@staticmethod
def _build_serialization_schema() -> KafkaRecordSerializationSchema:
return KafkaRecordSerializationSchema.builder() \
.set_topic('test-topic') \
.set_value_serialization_schema(SimpleStringSchema()) \
.build()
class KafkaRecordSerializationSchemaTests(PyFlinkTestCase):
def test_set_topic(self):
input_type = Types.ROW([Types.STRING()])
serialization_schema = KafkaRecordSerializationSchema.builder() \
.set_topic('test-topic') \
.set_value_serialization_schema(
JsonRowSerializationSchema.builder().with_type_info(input_type).build()) \
.build()
jvm = get_gateway().jvm
serialization_schema._j_serialization_schema.open(
jvm.org.apache.flink.connector.testutils.formats.DummyInitializationContext(),
jvm.org.apache.flink.connector.kafka.sink.DefaultKafkaSinkContext(
0, 1, jvm.java.util.Properties()))
j_record = serialization_schema._j_serialization_schema.serialize(
to_java_data_structure(Row('test')), None, None
)
self.assertEqual(j_record.topic(), 'test-topic')
self.assertIsNone(j_record.key())
self.assertEqual(j_record.value(), b'{"f0":"test"}')
def test_set_topic_selector(self):
def _select(data):
data = data[0]
if data == 'a':
return 'topic-a'
elif data == 'b':
return 'topic-b'
else:
return 'topic-dead-letter'
def _check_record(data, topic, serialized_data):
input_type = Types.ROW([Types.STRING()])
serialization_schema = KafkaRecordSerializationSchema.builder() \
.set_topic_selector(_select) \
.set_value_serialization_schema(
JsonRowSerializationSchema.builder().with_type_info(input_type).build()) \
.build()
jvm = get_gateway().jvm
serialization_schema._j_serialization_schema.open(
jvm.org.apache.flink.connector.testutils.formats.DummyInitializationContext(),
jvm.org.apache.flink.connector.kafka.sink.DefaultKafkaSinkContext(
0, 1, jvm.java.util.Properties()))
sink = KafkaSink.builder() \
.set_bootstrap_servers('localhost:9092') \
.set_record_serializer(serialization_schema) \
.build()
ds = MockDataStream(Types.ROW([Types.STRING()]))
ds.sink_to(sink)
row = Row(data)
topic_row = ds.feed(row) # type: Row
j_record = serialization_schema._j_serialization_schema.serialize(
to_java_data_structure(topic_row), None, None
)
self.assertEqual(j_record.topic(), topic)
self.assertIsNone(j_record.key())
self.assertEqual(j_record.value(), serialized_data)
_check_record('a', 'topic-a', b'{"f0":"a"}')
_check_record('b', 'topic-b', b'{"f0":"b"}')
_check_record('c', 'topic-dead-letter', b'{"f0":"c"}')
_check_record('d', 'topic-dead-letter', b'{"f0":"d"}')
def test_set_key_serialization_schema(self):
def _check_key_serialization_schema(key_serialization_schema, expected_class):
serialization_schema = KafkaRecordSerializationSchema.builder() \
.set_topic('test-topic') \
.set_key_serialization_schema(key_serialization_schema) \
.set_value_serialization_schema(SimpleStringSchema()) \
.build()
schema_field = get_field_value(serialization_schema._j_serialization_schema,
'keySerializationSchema')
self.assertIsNotNone(schema_field)
self.assertEqual(schema_field.getClass().getCanonicalName(), expected_class)
self._check_serialization_schema_implementations(_check_key_serialization_schema)
def test_set_value_serialization_schema(self):
def _check_value_serialization_schema(value_serialization_schema, expected_class):
serialization_schema = KafkaRecordSerializationSchema.builder() \
.set_topic('test-topic') \
.set_value_serialization_schema(value_serialization_schema) \
.build()
schema_field = get_field_value(serialization_schema._j_serialization_schema,
'valueSerializationSchema')
self.assertIsNotNone(schema_field)
self.assertEqual(schema_field.getClass().getCanonicalName(), expected_class)
self._check_serialization_schema_implementations(_check_value_serialization_schema)
@staticmethod
def _check_serialization_schema_implementations(check_function):
input_type = Types.ROW([Types.STRING()])
check_function(
JsonRowSerializationSchema.builder().with_type_info(input_type).build(),
'org.apache.flink.formats.json.JsonRowSerializationSchema'
)
check_function(
CsvRowSerializationSchema.Builder(input_type).build(),
'org.apache.flink.formats.csv.CsvRowSerializationSchema'
)
avro_schema_string = """
{
"type": "record",
"name": "test_record",
"fields": []
}
"""
check_function(
AvroRowSerializationSchema(avro_schema_string=avro_schema_string),
'org.apache.flink.formats.avro.AvroRowSerializationSchema'
)
check_function(
SimpleStringSchema(),
'org.apache.flink.api.common.serialization.SimpleStringSchema'
)
class MockDataStream(data_stream.DataStream):
def __init__(self, original_type=None):
super().__init__(None)
self._operators = []
self._type = original_type
def feed(self, data):
for op in self._operators:
data = op(data)
return data
def get_type(self):
return self._type
def map(self, f, output_type=None):
self._operators.append(f)
self._type = output_type
def sink_to(self, sink):
ds = self
from pyflink.datastream.connectors.base import SupportsPreprocessing
if isinstance(sink, SupportsPreprocessing) and sink.get_transformer() is not None:
ds = sink.get_transformer().apply(self)
return ds
| 31,052 | 46.121396 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_seq_source.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.connectors.number_seq import NumberSequenceSource
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
from pyflink.util.java_utils import load_java_class
class SequenceSourceTests(PyFlinkStreamingTestCase):
def test_seq_source(self):
seq_source = NumberSequenceSource(1, 10)
seq_source_clz = load_java_class(
"org.apache.flink.api.connector.source.lib.NumberSequenceSource")
from_field = seq_source_clz.getDeclaredField("from")
from_field.setAccessible(True)
self.assertEqual(1, from_field.get(seq_source.get_java_function()))
to_field = seq_source_clz.getDeclaredField("to")
to_field.setAccessible(True)
self.assertEqual(10, to_field.get(seq_source.get_java_function()))
| 1,756 | 46.486486 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_jdbc.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Types
from pyflink.datastream.connectors.jdbc import JdbcSink, JdbcConnectionOptions, JdbcExecutionOptions
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
from pyflink.util.java_utils import get_field_value
class FlinkJdbcSinkTest(PyFlinkStreamingTestCase):
def test_jdbc_sink(self):
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
jdbc_connection_options = JdbcConnectionOptions.JdbcConnectionOptionsBuilder()\
.with_driver_name('com.mysql.jdbc.Driver')\
.with_user_name('root')\
.with_password('password')\
.with_url('jdbc:mysql://server-name:server-port/database-name').build()
jdbc_execution_options = JdbcExecutionOptions.builder().with_batch_interval_ms(2000)\
.with_batch_size(100).with_max_retries(5).build()
jdbc_sink = JdbcSink.sink("insert into test table", ds.get_type(), jdbc_connection_options,
jdbc_execution_options)
ds.add_sink(jdbc_sink).name('jdbc sink')
plan = eval(self.env.get_execution_plan())
self.assertEqual('Sink: jdbc sink', plan['nodes'][1]['type'])
j_output_format = get_field_value(jdbc_sink.get_java_function(), 'outputFormat')
connection_options = JdbcConnectionOptions(
get_field_value(get_field_value(j_output_format, 'connectionProvider'),
'jdbcOptions'))
self.assertEqual(jdbc_connection_options.get_db_url(), connection_options.get_db_url())
self.assertEqual(jdbc_connection_options.get_driver_name(),
connection_options.get_driver_name())
self.assertEqual(jdbc_connection_options.get_password(), connection_options.get_password())
self.assertEqual(jdbc_connection_options.get_user_name(),
connection_options.get_user_name())
exec_options = JdbcExecutionOptions(get_field_value(j_output_format, 'executionOptions'))
self.assertEqual(jdbc_execution_options.get_batch_interval_ms(),
exec_options.get_batch_interval_ms())
self.assertEqual(jdbc_execution_options.get_batch_size(),
exec_options.get_batch_size())
self.assertEqual(jdbc_execution_options.get_max_retries(),
exec_options.get_max_retries())
| 3,474 | 55.048387 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_pulsar.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import WatermarkStrategy, SimpleStringSchema, Types, ConfigOptions, Duration
from pyflink.datastream.connectors import DeliveryGuarantee
from pyflink.datastream.connectors.pulsar import TopicRoutingMode, MessageDelayer, PulsarSink, \
PulsarSource, StartCursor, StopCursor, RangeGenerator
from pyflink.testing.test_case_utils import PyFlinkUTTestCase
from pyflink.util.java_utils import get_field_value, is_instance_of
class FlinkPulsarTest(PyFlinkUTTestCase):
def test_pulsar_source(self):
TEST_OPTION_NAME = 'pulsar.source.enableAutoAcknowledgeMessage'
pulsar_source = PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics('ada') \
.set_start_cursor(StartCursor.earliest()) \
.set_unbounded_stop_cursor(StopCursor.never()) \
.set_bounded_stop_cursor(StopCursor.at_publish_time(22)) \
.set_subscription_name('ff') \
.set_consumer_name('test_consumer') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_config(TEST_OPTION_NAME, True) \
.set_properties({'pulsar.source.autoCommitCursorInterval': '1000'}) \
.build()
ds = self.env.from_source(source=pulsar_source,
watermark_strategy=WatermarkStrategy.for_monotonous_timestamps(),
source_name="pulsar source")
ds.print()
plan = eval(self.env.get_execution_plan())
self.assertEqual('Source: pulsar source', plan['nodes'][0]['type'])
configuration = get_field_value(pulsar_source.get_java_function(), "sourceConfiguration")
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.client.serviceUrl')
.string_type()
.no_default_value()._j_config_option), 'pulsar://localhost:6650')
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.admin.adminUrl')
.string_type()
.no_default_value()._j_config_option), 'http://localhost:8080')
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.consumer.subscriptionName')
.string_type()
.no_default_value()._j_config_option), 'ff')
test_option = ConfigOptions.key(TEST_OPTION_NAME).boolean_type().no_default_value()
self.assertEqual(
configuration.getBoolean(
test_option._j_config_option), True)
self.assertEqual(
configuration.getLong(
ConfigOptions.key('pulsar.source.autoCommitCursorInterval')
.long_type()
.no_default_value()._j_config_option), 1000)
def test_source_set_topics_with_list(self):
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics(['ada', 'beta']) \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.build()
def test_source_set_topics_pattern(self):
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topic_pattern('ada.*') \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.build()
def test_source_deprecated_method(self):
test_option = ConfigOptions.key('pulsar.source.enableAutoAcknowledgeMessage') \
.boolean_type().no_default_value()
pulsar_source = PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topic_pattern('ada.*') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_unbounded_stop_cursor(StopCursor.at_publish_time(4444)) \
.set_subscription_name('ff') \
.set_config(test_option, True) \
.set_properties({'pulsar.source.autoCommitCursorInterval': '1000'}) \
.build()
configuration = get_field_value(pulsar_source.get_java_function(), "sourceConfiguration")
self.assertEqual(
configuration.getBoolean(
test_option._j_config_option), True)
self.assertEqual(
configuration.getLong(
ConfigOptions.key('pulsar.source.autoCommitCursorInterval')
.long_type()
.no_default_value()._j_config_option), 1000)
def test_stop_cursor_publish_time(self):
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics('ada') \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_start_cursor(StartCursor.from_publish_time(2)) \
.set_bounded_stop_cursor(StopCursor.at_publish_time(14)) \
.set_bounded_stop_cursor(StopCursor.after_publish_time(24)) \
.build()
def test_stop_cursor_event_time(self):
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics('ada') \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_bounded_stop_cursor(StopCursor.after_event_time(14)) \
.set_bounded_stop_cursor(StopCursor.at_event_time(24)) \
.build()
def test_set_range_generator(self):
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics(['ada', 'beta']) \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_range_generator(RangeGenerator.full()) \
.build()
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics(['ada', 'beta']) \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_range_generator(RangeGenerator.fixed_key(keys='k', key_bytes=bytearray(b'abc'))) \
.build()
def test_set_authentication(self):
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics(['ada', 'beta']) \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_authentication('test.class', 'key1:val1,key2:val2') \
.build()
PulsarSource.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics(['ada', 'beta']) \
.set_subscription_name('ff') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_authentication('test.class', {'k1': 'v1', 'k2': 'v2'}) \
.build()
def test_pulsar_sink(self):
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
TEST_OPTION_NAME = 'pulsar.producer.chunkingEnabled'
pulsar_sink = PulsarSink.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_producer_name('fo') \
.set_topics('ada') \
.set_serialization_schema(SimpleStringSchema()) \
.set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
.set_topic_routing_mode(TopicRoutingMode.ROUND_ROBIN) \
.delay_sending_message(MessageDelayer.fixed(Duration.of_seconds(12))) \
.set_config(TEST_OPTION_NAME, True) \
.set_properties({'pulsar.producer.batchingMaxMessages': '100'}) \
.build()
ds.sink_to(pulsar_sink).name('pulsar sink')
plan = eval(self.env.get_execution_plan())
self.assertEqual('pulsar sink: Writer', plan['nodes'][1]['type'])
configuration = get_field_value(pulsar_sink.get_java_function(), "sinkConfiguration")
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.client.serviceUrl')
.string_type()
.no_default_value()._j_config_option), 'pulsar://localhost:6650')
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.admin.adminUrl')
.string_type()
.no_default_value()._j_config_option), 'http://localhost:8080')
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.producer.producerName')
.string_type()
.no_default_value()._j_config_option), 'fo - %s')
j_pulsar_serialization_schema = get_field_value(
pulsar_sink.get_java_function(), 'serializationSchema')
j_serialization_schema = get_field_value(
j_pulsar_serialization_schema, 'serializationSchema')
self.assertTrue(
is_instance_of(
j_serialization_schema,
'org.apache.flink.api.common.serialization.SimpleStringSchema'))
self.assertEqual(
configuration.getString(
ConfigOptions.key('pulsar.sink.deliveryGuarantee')
.string_type()
.no_default_value()._j_config_option), 'at-least-once')
j_topic_router = get_field_value(pulsar_sink.get_java_function(), "topicRouter")
self.assertTrue(
is_instance_of(
j_topic_router,
'org.apache.flink.connector.pulsar.sink.writer.router.RoundRobinTopicRouter'))
j_message_delayer = get_field_value(pulsar_sink.get_java_function(), 'messageDelayer')
delay_duration = get_field_value(j_message_delayer, 'delayDuration')
self.assertEqual(delay_duration, 12000)
test_option = ConfigOptions.key(TEST_OPTION_NAME).boolean_type().no_default_value()
self.assertEqual(
configuration.getBoolean(
test_option._j_config_option), True)
self.assertEqual(
configuration.getLong(
ConfigOptions.key('pulsar.producer.batchingMaxMessages')
.long_type()
.no_default_value()._j_config_option), 100)
def test_sink_set_topics_with_list(self):
PulsarSink.builder() \
.set_service_url('pulsar://localhost:6650') \
.set_admin_url('http://localhost:8080') \
.set_topics(['ada', 'beta']) \
.set_serialization_schema(SimpleStringSchema()) \
.build()
| 12,226 | 46.026923 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_file_system.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Duration
from pyflink.common.serialization import Encoder
from pyflink.common.typeinfo import Types
from pyflink.datastream.connectors.file_system import FileCompactStrategy, FileCompactor, \
StreamingFileSink, OutputFileConfig, FileSource, StreamFormat, FileEnumeratorProvider, \
FileSplitAssignerProvider, RollingPolicy, FileSink, BucketAssigner
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
from pyflink.util.java_utils import load_java_class
class FileSystemTests(PyFlinkStreamingTestCase):
def test_stream_file_sink(self):
self.env.set_parallelism(2)
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
ds.map(
lambda a: a[0],
Types.STRING()).add_sink(
StreamingFileSink.for_row_format(self.tempdir, Encoder.simple_string_encoder())
.with_rolling_policy(
RollingPolicy.default_rolling_policy(
part_size=1024 * 1024 * 1024,
rollover_interval=15 * 60 * 1000,
inactivity_interval=5 * 60 * 1000))
.with_output_file_config(
OutputFileConfig.OutputFileConfigBuilder()
.with_part_prefix("prefix")
.with_part_suffix("suffix").build()).build())
self.env.execute("test_streaming_file_sink")
results = []
import os
for root, dirs, files in os.walk(self.tempdir, topdown=True):
for file in files:
self.assertTrue(file.startswith('.prefix'))
self.assertTrue('suffix' in file)
path = root + "/" + file
with open(path) as infile:
for line in infile:
results.append(line)
expected = ['deeefg\n', 'bdc\n', 'ab\n', 'cfgs\n']
results.sort()
expected.sort()
self.assertEqual(expected, results)
def test_file_source(self):
stream_format = StreamFormat.text_line_format()
paths = ["/tmp/1.txt", "/tmp/2.txt"]
file_source_builder = FileSource.for_record_stream_format(stream_format, *paths)
file_source = file_source_builder\
.monitor_continuously(Duration.of_days(1)) \
.set_file_enumerator(FileEnumeratorProvider.default_splittable_file_enumerator()) \
.set_split_assigner(FileSplitAssignerProvider.locality_aware_split_assigner()) \
.build()
continuous_setting = file_source.get_java_function().getContinuousEnumerationSettings()
self.assertIsNotNone(continuous_setting)
self.assertEqual(Duration.of_days(1), Duration(continuous_setting.getDiscoveryInterval()))
input_paths_field = \
load_java_class("org.apache.flink.connector.file.src.AbstractFileSource"). \
getDeclaredField("inputPaths")
input_paths_field.setAccessible(True)
input_paths = input_paths_field.get(file_source.get_java_function())
self.assertEqual(len(input_paths), len(paths))
self.assertEqual(str(input_paths[0]), paths[0])
self.assertEqual(str(input_paths[1]), paths[1])
def test_file_sink(self):
base_path = "/tmp/1.txt"
encoder = Encoder.simple_string_encoder()
file_sink_builder = FileSink.for_row_format(base_path, encoder)
file_sink = file_sink_builder\
.with_bucket_check_interval(1000) \
.with_bucket_assigner(BucketAssigner.base_path_bucket_assigner()) \
.with_rolling_policy(RollingPolicy.on_checkpoint_rolling_policy()) \
.with_output_file_config(
OutputFileConfig.builder().with_part_prefix("pre").with_part_suffix("suf").build())\
.enable_compact(FileCompactStrategy.builder()
.enable_compaction_on_checkpoint(3)
.set_size_threshold(1024)
.set_num_compact_threads(2)
.build(),
FileCompactor.concat_file_compactor(b'\n')) \
.build()
buckets_builder_field = \
load_java_class("org.apache.flink.connector.file.sink.FileSink"). \
getDeclaredField("bucketsBuilder")
buckets_builder_field.setAccessible(True)
buckets_builder = buckets_builder_field.get(file_sink.get_java_function())
self.assertEqual("DefaultRowFormatBuilder", buckets_builder.getClass().getSimpleName())
row_format_builder_clz = load_java_class(
"org.apache.flink.connector.file.sink.FileSink$RowFormatBuilder")
encoder_field = row_format_builder_clz.getDeclaredField("encoder")
encoder_field.setAccessible(True)
self.assertEqual("SimpleStringEncoder",
encoder_field.get(buckets_builder).getClass().getSimpleName())
interval_field = row_format_builder_clz.getDeclaredField("bucketCheckInterval")
interval_field.setAccessible(True)
self.assertEqual(1000, interval_field.get(buckets_builder))
bucket_assigner_field = row_format_builder_clz.getDeclaredField("bucketAssigner")
bucket_assigner_field.setAccessible(True)
self.assertEqual("BasePathBucketAssigner",
bucket_assigner_field.get(buckets_builder).getClass().getSimpleName())
rolling_policy_field = row_format_builder_clz.getDeclaredField("rollingPolicy")
rolling_policy_field.setAccessible(True)
self.assertEqual("OnCheckpointRollingPolicy",
rolling_policy_field.get(buckets_builder).getClass().getSimpleName())
output_file_config_field = row_format_builder_clz.getDeclaredField("outputFileConfig")
output_file_config_field.setAccessible(True)
output_file_config = output_file_config_field.get(buckets_builder)
self.assertEqual("pre", output_file_config.getPartPrefix())
self.assertEqual("suf", output_file_config.getPartSuffix())
compact_strategy_field = row_format_builder_clz.getDeclaredField("compactStrategy")
compact_strategy_field.setAccessible(True)
compact_strategy = compact_strategy_field.get(buckets_builder)
self.assertEqual(3, compact_strategy.getNumCheckpointsBeforeCompaction())
self.assertEqual(1024, compact_strategy.getSizeThreshold())
self.assertEqual(2, compact_strategy.getNumCompactThreads())
file_compactor_field = row_format_builder_clz.getDeclaredField("fileCompactor")
file_compactor_field.setAccessible(True)
file_compactor = file_compactor_field.get(buckets_builder)
self.assertEqual("ConcatFileCompactor", file_compactor.getClass().getSimpleName())
concat_file_compactor_clz = load_java_class(
"org.apache.flink.connector.file.sink.compactor.ConcatFileCompactor"
)
file_delimiter_field = concat_file_compactor_clz.getDeclaredField("fileDelimiter")
file_delimiter_field.setAccessible(True)
file_delimiter = file_delimiter_field.get(file_compactor)
self.assertEqual(b'\n', file_delimiter)
| 8,262 | 50.006173 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_cassandra.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Types
from pyflink.datastream.connectors.cassandra import CassandraSink, MapperOptions, ConsistencyLevel
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase
class CassandraSinkTest(PyFlinkStreamingTestCase):
def test_cassandra_sink(self):
type_info = Types.ROW([Types.STRING(), Types.INT()])
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)],
type_info=type_info)
cassandra_sink_builder = CassandraSink.add_sink(ds)
cassandra_sink = cassandra_sink_builder\
.set_host('localhost', 9876) \
.set_query('query') \
.enable_ignore_null_fields() \
.set_mapper_options(MapperOptions()
.ttl(1)
.timestamp(100)
.tracing(True)
.if_not_exists(False)
.consistency_level(ConsistencyLevel.ANY)
.save_null_fields(True)) \
.set_max_concurrent_requests(1000) \
.build()
cassandra_sink.name('cassandra_sink').set_parallelism(3)
plan = eval(self.env.get_execution_plan())
self.assertEqual("Sink: cassandra_sink", plan['nodes'][1]['type'])
self.assertEqual(3, plan['nodes'][1]['parallelism'])
| 2,382 | 46.66 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/connectors/tests/test_kinesis.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import SimpleStringSchema, Types
from pyflink.datastream.connectors.kinesis import PartitionKeyGenerator, FlinkKinesisConsumer, \
KinesisStreamsSink, KinesisFirehoseSink
from pyflink.testing.test_case_utils import PyFlinkUTTestCase
from pyflink.util.java_utils import get_field_value
class FlinkKinesisTest(PyFlinkUTTestCase):
def test_kinesis_source(self):
consumer_config = {
'aws.region': 'us-east-1',
'aws.credentials.provider.basic.accesskeyid': 'aws_access_key_id',
'aws.credentials.provider.basic.secretkey': 'aws_secret_access_key',
'flink.stream.initpos': 'LATEST'
}
kinesis_source = FlinkKinesisConsumer("stream-1", SimpleStringSchema(), consumer_config)
ds = self.env.add_source(source_func=kinesis_source, source_name="kinesis source")
ds.print()
plan = eval(self.env.get_execution_plan())
self.assertEqual('Source: kinesis source', plan['nodes'][0]['type'])
self.assertEqual(
get_field_value(kinesis_source.get_java_function(), 'streams')[0], 'stream-1')
def test_kinesis_streams_sink(self):
sink_properties = {
'aws.region': 'us-east-1',
'aws.credentials.provider.basic.secretkey': 'aws_secret_access_key'
}
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
kinesis_streams_sink = KinesisStreamsSink.builder() \
.set_kinesis_client_properties(sink_properties) \
.set_serialization_schema(SimpleStringSchema()) \
.set_partition_key_generator(PartitionKeyGenerator.fixed()) \
.set_stream_name("stream-1") \
.set_fail_on_error(False) \
.set_max_batch_size(500) \
.set_max_in_flight_requests(50) \
.set_max_buffered_requests(10000) \
.set_max_batch_size_in_bytes(5 * 1024 * 1024) \
.set_max_time_in_buffer_ms(5000) \
.set_max_record_size_in_bytes(1 * 1024 * 1024) \
.build()
ds.sink_to(kinesis_streams_sink).name('kinesis streams sink')
plan = eval(self.env.get_execution_plan())
self.assertEqual('kinesis streams sink: Writer', plan['nodes'][1]['type'])
self.assertEqual(get_field_value(kinesis_streams_sink.get_java_function(), 'failOnError'),
False)
self.assertEqual(
get_field_value(kinesis_streams_sink.get_java_function(), 'streamName'), 'stream-1')
def test_kinesis_firehose_sink(self):
sink_properties = {
'aws.region': 'eu-west-1',
'aws.credentials.provider.basic.accesskeyid': 'aws_access_key_id',
'aws.credentials.provider.basic.secretkey': 'aws_secret_access_key'
}
ds = self.env.from_collection([('ab', 1), ('bdc', 2), ('cfgs', 3), ('deeefg', 4)],
type_info=Types.ROW([Types.STRING(), Types.INT()]))
kinesis_firehose_sink = KinesisFirehoseSink.builder() \
.set_firehose_client_properties(sink_properties) \
.set_serialization_schema(SimpleStringSchema()) \
.set_delivery_stream_name('stream-1') \
.set_fail_on_error(False) \
.set_max_batch_size(500) \
.set_max_in_flight_requests(50) \
.set_max_buffered_requests(10000) \
.set_max_batch_size_in_bytes(5 * 1024 * 1024) \
.set_max_time_in_buffer_ms(5000) \
.set_max_record_size_in_bytes(1 * 1024 * 1024) \
.build()
ds.sink_to(kinesis_firehose_sink).name('kinesis firehose sink')
plan = eval(self.env.get_execution_plan())
self.assertEqual('kinesis firehose sink: Writer', plan['nodes'][1]['type'])
self.assertEqual(get_field_value(kinesis_firehose_sink.get_java_function(), 'failOnError'),
False)
self.assertEqual(
get_field_value(kinesis_firehose_sink.get_java_function(), 'deliveryStreamName'),
'stream-1')
| 5,141 | 46.174312 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/testing/source_sink_utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
import glob
import os
import unittest
from py4j.java_gateway import java_import
from pyflink.find_flink_home import _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.table.sinks import TableSink
from pyflink.table.types import _to_java_data_type
from pyflink.util import java_utils
class TestTableSink(TableSink):
"""
Base class for test table sink.
"""
_inited = False
def __init__(self, j_table_sink):
super(TestTableSink, self).__init__(j_table_sink)
@classmethod
def _ensure_initialized(cls):
if TestTableSink._inited:
return
FLINK_SOURCE_ROOT_DIR = _find_flink_source_root()
filename_pattern = (
"flink-python/target/flink-python*-tests.jar")
if not glob.glob(os.path.join(FLINK_SOURCE_ROOT_DIR, filename_pattern)):
raise unittest.SkipTest(
"'flink-python*-tests.jar' is not available. Will skip the related tests.")
gateway = get_gateway()
java_import(gateway.jvm, "org.apache.flink.table.utils.TestingSinks$TestAppendingSink")
java_import(gateway.jvm, "org.apache.flink.table.utils.TestingSinks$RowCollector")
TestTableSink._inited = True
class TestAppendSink(TestTableSink):
"""
A test append table sink.
"""
def __init__(self, field_names, field_types):
TestTableSink._ensure_initialized()
gateway = get_gateway()
j_field_names = java_utils.to_jarray(gateway.jvm.String, field_names)
j_field_types = java_utils.to_jarray(
gateway.jvm.DataType,
[_to_java_data_type(field_type) for field_type in field_types])
super(TestAppendSink, self).__init__(gateway.jvm.org.apache.flink.table.utils.TestingSinks
.TestAppendingSink(j_field_names, j_field_types))
class TestRetractSink(TestTableSink):
"""
A test retract table sink.
"""
def __init__(self, field_names, field_types):
TestTableSink._ensure_initialized()
gateway = get_gateway()
j_field_names = java_utils.to_jarray(gateway.jvm.String, field_names)
j_field_types = java_utils.to_jarray(
gateway.jvm.DataType,
[_to_java_data_type(field_type) for field_type in field_types])
super(TestRetractSink, self).__init__(gateway.jvm.org.apache.flink.table.utils.TestingSinks
.TestAppendingSink(j_field_names, j_field_types))
class TestUpsertSink(TestTableSink):
"""
A test upsert table sink.
"""
def __init__(self, field_names, field_types, keys, is_append_only):
TestTableSink._ensure_initialized()
gateway = get_gateway()
j_keys = gateway.new_array(gateway.jvm.String, len(keys))
for i in range(0, len(keys)):
j_keys[i] = keys[i]
super(TestUpsertSink, self).__init__(
gateway.jvm.TestUpsertSink(j_keys, is_append_only), field_names, field_types)
def results():
"""
Retrieves the results from an append table sink.
"""
return retract_results()
def retract_results():
"""
Retrieves the results from a retract table sink.
"""
gateway = get_gateway()
results = gateway.jvm.org.apache.flink.table.utils.TestingSinks.RowCollector.getAndClearValues()
return gateway.jvm\
.org.apache.flink.table.utils.TestingSinks.RowCollector.retractResults(results)
def upsert_results(keys):
"""
Retrieves the results from an upsert table sink.
"""
gateway = get_gateway()
j_keys = gateway.new_array(gateway.jvm.int, len(keys))
for i in range(0, len(keys)):
j_keys[i] = keys[i]
results = gateway.jvm.RowCollector.getAndClearValues()
return gateway.jvm.RowCollector.upsertResults(results, j_keys)
| 4,814 | 34.145985 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/testing/test_case_utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
import calendar
import datetime
import glob
import logging
import os
import re
import shutil
import sys
import tempfile
import time
import unittest
from abc import abstractmethod
from decimal import Decimal
from py4j.java_gateway import JavaObject
from pyflink.common import JobExecutionResult, Time, Instant, Row
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.stream_execution_environment import StreamExecutionEnvironment
from pyflink.find_flink_home import _find_flink_home, _find_flink_source_root
from pyflink.java_gateway import get_gateway
from pyflink.table.table_environment import StreamTableEnvironment
from pyflink.util.java_utils import add_jars_to_context_class_loader, to_jarray
if os.getenv("VERBOSE"):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
def exec_insert_table(table, table_path) -> JobExecutionResult:
return table.execute_insert(table_path).get_job_client().get_job_execution_result().result()
def _load_specific_flink_module_jars(jars_relative_path):
flink_source_root = _find_flink_source_root()
jars_abs_path = flink_source_root + jars_relative_path
specific_jars = glob.glob(jars_abs_path + '/target/flink*.jar')
specific_jars = ['file://' + specific_jar for specific_jar in specific_jars]
add_jars_to_context_class_loader(specific_jars)
def invoke_java_object_method(obj, method_name):
clz = obj.getClass()
j_method = None
while clz is not None:
try:
j_method = clz.getDeclaredMethod(method_name, None)
if j_method is not None:
break
except:
clz = clz.getSuperclass()
if j_method is None:
raise Exception("No such method: " + method_name)
j_method.setAccessible(True)
return j_method.invoke(obj, to_jarray(get_gateway().jvm.Object, []))
class PyFlinkTestCase(unittest.TestCase):
"""
Base class for unit tests.
"""
@classmethod
def setUpClass(cls):
cls.tempdir = tempfile.mkdtemp()
os.environ["FLINK_TESTING"] = "1"
os.environ['_python_worker_execution_mode'] = "process"
_find_flink_home()
logging.info("Using %s as FLINK_HOME...", os.environ["FLINK_HOME"])
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tempdir, ignore_errors=True)
del os.environ['_python_worker_execution_mode']
@classmethod
def assert_equals(cls, actual, expected):
if isinstance(actual, JavaObject):
actual_py_list = cls.to_py_list(actual)
else:
actual_py_list = actual
actual_py_list.sort()
expected.sort()
assert len(actual_py_list) == len(expected)
assert all(x == y for x, y in zip(actual_py_list, expected))
@classmethod
def to_py_list(cls, actual):
py_list = []
for i in range(0, actual.size()):
py_list.append(actual.get(i))
return py_list
class PyFlinkITTestCase(PyFlinkTestCase):
@classmethod
def setUpClass(cls):
super(PyFlinkITTestCase, cls).setUpClass()
gateway = get_gateway()
MiniClusterResourceConfiguration = (
gateway.jvm.org.apache.flink.runtime.testutils.MiniClusterResourceConfiguration
.Builder()
.setNumberTaskManagers(8)
.setNumberSlotsPerTaskManager(1)
.setRpcServiceSharing(
get_gateway().jvm.org.apache.flink.runtime.minicluster.RpcServiceSharing.DEDICATED)
.withHaLeadershipControl()
.build())
cls.resource = (
get_gateway().jvm.org.apache.flink.test.util.
MiniClusterWithClientResource(MiniClusterResourceConfiguration))
cls.resource.before()
cls.env = StreamExecutionEnvironment(
get_gateway().jvm.org.apache.flink.streaming.util.TestStreamEnvironment(
cls.resource.getMiniCluster(), 2))
@classmethod
def tearDownClass(cls):
super(PyFlinkITTestCase, cls).tearDownClass()
cls.resource.after()
class PyFlinkUTTestCase(PyFlinkTestCase):
def setUp(self) -> None:
self.env = StreamExecutionEnvironment.get_execution_environment()
self.env.set_runtime_mode(RuntimeExecutionMode.STREAMING)
self.env.set_parallelism(2)
self.t_env = StreamTableEnvironment.create(self.env)
self.t_env.get_config().set("python.fn-execution.bundle.size", "1")
class PyFlinkStreamTableTestCase(PyFlinkITTestCase):
"""
Base class for table stream tests.
"""
@classmethod
def setUpClass(cls):
super(PyFlinkStreamTableTestCase, cls).setUpClass()
cls.env.set_runtime_mode(RuntimeExecutionMode.STREAMING)
cls.env.set_parallelism(2)
cls.t_env = StreamTableEnvironment.create(cls.env)
cls.t_env.get_config().set("python.fn-execution.bundle.size", "1")
class PyFlinkBatchTableTestCase(PyFlinkITTestCase):
"""
Base class for table batch tests.
"""
@classmethod
def setUpClass(cls):
super(PyFlinkBatchTableTestCase, cls).setUpClass()
cls.env.set_runtime_mode(RuntimeExecutionMode.BATCH)
cls.env.set_parallelism(2)
cls.t_env = StreamTableEnvironment.create(cls.env)
cls.t_env.get_config().set("python.fn-execution.bundle.size", "1")
class PyFlinkStreamingTestCase(PyFlinkITTestCase):
"""
Base class for streaming tests.
"""
@classmethod
def setUpClass(cls):
super(PyFlinkStreamingTestCase, cls).setUpClass()
cls.env.set_parallelism(2)
cls.env.set_runtime_mode(RuntimeExecutionMode.STREAMING)
class PyFlinkBatchTestCase(PyFlinkITTestCase):
"""
Base class for batch tests.
"""
@classmethod
def setUpClass(cls):
super(PyFlinkBatchTestCase, cls).setUpClass()
cls.env.set_parallelism(2)
cls.env.set_runtime_mode(RuntimeExecutionMode.BATCH)
class PythonAPICompletenessTestCase(object):
"""
Base class for Python API completeness tests, i.e.,
Python API should be aligned with the Java API as much as possible.
"""
@classmethod
def get_python_class_methods(cls, python_class):
return {cls.snake_to_camel(cls.java_method_name(method_name))
for method_name in dir(python_class) if not method_name.startswith('_')}
@staticmethod
def snake_to_camel(method_name):
output = ''.join(x.capitalize() or '_' for x in method_name.split('_'))
return output[0].lower() + output[1:]
@staticmethod
def get_java_class_methods(java_class):
gateway = get_gateway()
s = set()
method_arr = gateway.jvm.Class.forName(java_class).getMethods()
for i in range(0, len(method_arr)):
s.add(method_arr[i].getName())
return s
@classmethod
def check_methods(cls):
java_primary_methods = {'getClass', 'notifyAll', 'equals', 'hashCode', 'toString',
'notify', 'wait'}
java_methods = PythonAPICompletenessTestCase.get_java_class_methods(cls.java_class())
python_methods = cls.get_python_class_methods(cls.python_class())
missing_methods = java_methods - python_methods - cls.excluded_methods() \
- java_primary_methods
if len(missing_methods) > 0:
raise Exception('Methods: %s in Java class %s have not been added in Python class %s.'
% (missing_methods, cls.java_class(), cls.python_class()))
@classmethod
def java_method_name(cls, python_method_name):
"""
This method should be overwritten when the method name of the Python API cannot be
consistent with the Java API method name. e.g.: 'as' is python
keyword, so we use 'alias' in Python API corresponding 'as' in Java API.
:param python_method_name: Method name of Python API.
:return: The corresponding method name of Java API.
"""
return python_method_name
@classmethod
@abstractmethod
def python_class(cls):
"""
Return the Python class that needs to be compared. such as :class:`Table`.
"""
pass
@classmethod
@abstractmethod
def java_class(cls):
"""
Return the Java class that needs to be compared. such as `org.apache.flink.table.api.Table`.
"""
pass
@classmethod
def excluded_methods(cls):
"""
Exclude method names that do not need to be checked. When adding excluded methods
to the lists you should give a good reason in a comment.
:return:
"""
return {"equals", "hashCode", "toString"}
def test_completeness(self):
self.check_methods()
def replace_uuid(input_obj):
if isinstance(input_obj, str):
return re.sub(r'[a-z0-9]{8}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{4}-[a-z0-9]{12}',
'{uuid}', input_obj)
elif isinstance(input_obj, dict):
input_obj_copy = dict()
for key in input_obj:
input_obj_copy[replace_uuid(key)] = replace_uuid(input_obj[key])
return input_obj_copy
class Tuple2(object):
def __init__(self, f0, f1):
self.f0 = f0
self.f1 = f1
self.field = [f0, f1]
def getField(self, index):
return self.field[index]
class TestEnv(object):
def __init__(self):
self.result = []
def registerCachedFile(self, file_path, key):
self.result.append(Tuple2(key, file_path))
def getCachedFiles(self):
return self.result
def to_dict(self):
result = dict()
for item in self.result:
result[item.f0] = item.f1
return result
DATE_EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
TIME_EPOCH_ORDINAL = calendar.timegm(time.localtime(0)) * 10 ** 3
def _date_to_millis(d: datetime.date):
return (d.toordinal() - DATE_EPOCH_ORDINAL) * 86400 * 1000
def _time_to_millis(t: datetime.time):
if t.tzinfo is not None:
offset = t.utcoffset()
offset = offset if offset else datetime.timedelta()
offset_millis = \
(offset.days * 86400 + offset.seconds) * 10 ** 3 + offset.microseconds // 1000
else:
offset_millis = TIME_EPOCH_ORDINAL
minutes = t.hour * 60 + t.minute
seconds = minutes * 60 + t.second
return seconds * 10 ** 3 + t.microsecond // 1000 - offset_millis
def to_java_data_structure(value):
jvm = get_gateway().jvm
if isinstance(value, (int, float, str, bytes)):
return value
elif isinstance(value, Decimal):
return jvm.java.math.BigDecimal.valueOf(float(value))
elif isinstance(value, datetime.datetime):
if value.tzinfo is None:
return jvm.java.sql.Timestamp(
_date_to_millis(value.date()) + _time_to_millis(value.time())
)
return jvm.java.time.Instant.ofEpochMilli(
(
calendar.timegm(value.utctimetuple()) +
calendar.timegm(time.localtime(0))
) * 1000 +
value.microsecond // 1000
)
elif isinstance(value, datetime.date):
return jvm.java.sql.Date(_date_to_millis(value))
elif isinstance(value, datetime.time):
return jvm.java.sql.Time(_time_to_millis(value))
elif isinstance(value, Time):
return jvm.java.sql.Time(value.to_milliseconds())
elif isinstance(value, Instant):
return jvm.java.time.Instant.ofEpochMilli(value.to_epoch_milli())
elif isinstance(value, (list, tuple)):
j_list = jvm.java.util.ArrayList()
for i in value:
j_list.add(to_java_data_structure(i))
return j_list
elif isinstance(value, dict):
j_map = jvm.java.util.HashMap()
for k, v in value.items():
j_map.put(to_java_data_structure(k), to_java_data_structure(v))
return j_map
elif isinstance(value, Row):
if hasattr(value, '_fields'):
j_row = jvm.org.apache.flink.types.Row.withNames(value.get_row_kind().to_j_row_kind())
for field_name, value in zip(value._fields, value._values):
j_row.setField(field_name, to_java_data_structure(value))
else:
j_row = jvm.org.apache.flink.types.Row.withPositions(
value.get_row_kind().to_j_row_kind(), len(value)
)
for idx, value in enumerate(value._values):
j_row.setField(idx, to_java_data_structure(value))
return j_row
else:
raise TypeError('unsupported value type {}'.format(str(type(value))))
| 13,751 | 33.639798 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/testing/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
| 959 | 52.333333 | 81 |
py
|
flink
|
flink-master/flink-python/pyflink/common/job_id.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
__all__ = ['JobID']
class JobID(object):
"""
Unique (at least statistically unique) identifier for a Flink Job. Jobs in Flink correspond
to dataflow graphs.
Jobs act simultaneously as sessions, because jobs can be created and submitted incrementally
in different parts. Newer fragments of a graph can be attached to existing graphs, thereby
extending the current data flow graphs.
.. versionadded:: 1.11.0
"""
def __init__(self, j_job_id):
self._j_job_id = j_job_id
def __str__(self):
return self._j_job_id.toString()
| 1,538 | 39.5 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/common/job_status.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.java_gateway import get_gateway
__all__ = ['JobStatus']
class JobStatus(Enum):
"""
Possible states of a job once it has been accepted by the job manager.
:data:`CREATED`:
Job is newly created, no task has started to run.
:data:`RUNNING`:
Some tasks are scheduled or running, some may be pending, some may be finished.
:data:`FAILING`:
The job has failed and is currently waiting for the cleanup to complete.
:data:`FAILED`:
The job has failed with a non-recoverable task failure.
:data:`CANCELLING`:
Job is being cancelled.
:data:`CANCELED`:
Job has been cancelled.
:data:`FINISHED`:
All of the job's tasks have successfully finished.
:data:`RESTARTING`:
The job is currently undergoing a reset and total restart.
:data:`SUSPENDED`:
The job has been suspended which means that it has been stopped but not been removed from a
potential HA job store.
:data:`RECONCILING`:
The job is currently reconciling and waits for task execution report to recover state.
.. versionadded:: 1.11.0
"""
CREATED = 0
RUNNING = 1
FAILING = 2
FAILED = 3
CANCELLING = 4
CANCELED = 5
FINISHED = 6
RESTARTING = 7
SUSPENDED = 8
RECONCILING = 9
def is_globally_terminal_state(self) -> bool:
"""
Checks whether this state is <i>globally terminal</i>. A globally terminal job
is complete and cannot fail any more and will not be restarted or recovered by another
standby master node.
When a globally terminal state has been reached, all recovery data for the job is
dropped from the high-availability services.
:return: ``True`` if this job status is globally terminal, ``False`` otherwise.
.. versionadded:: 1.11.0
"""
return self._to_j_job_status().isGloballyTerminalState()
def is_terminal_state(self) -> bool:
"""
Checks whether this state is locally terminal. Locally terminal refers to the
state of a job's execution graph within an executing JobManager. If the execution graph
is locally terminal, the JobManager will not continue executing or recovering the job.
The only state that is locally terminal, but not globally terminal is SUSPENDED,
which is typically entered when the executing JobManager loses its leader status.
:return: ``True`` if this job status is terminal, ``False`` otherwise.
.. versionadded:: 1.11.0
"""
return self._to_j_job_status().isTerminalState()
@staticmethod
def _from_j_job_status(j_job_status) -> 'JobStatus':
return JobStatus[j_job_status.name()]
def _to_j_job_status(self):
gateway = get_gateway()
JJobStatus = gateway.jvm.org.apache.flink.api.common.JobStatus
return getattr(JJobStatus, self.name)
| 3,894 | 30.92623 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/common/execution_mode.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.java_gateway import get_gateway
__all__ = ['ExecutionMode']
class ExecutionMode(Enum):
"""
The execution mode specifies how a batch program is executed in terms
of data exchange: pipelining or batched.
:data:`PIPELINED`:
Executes the program in a pipelined fashion (including shuffles and broadcasts),
except for data exchanges that are susceptible to deadlocks when pipelining.
These data exchanges are performed in a batch manner.
An example of situations that are susceptible to deadlocks (when executed in a
pipelined manner) are data flows that branch (one data set consumed by multiple
operations) and re-join later.
:data:`PIPELINED_FORCED`:
Executes the program in a pipelined fashion (including shuffles and broadcasts),
**including** data exchanges that are susceptible to deadlocks when
executed via pipelining.
Usually, PIPELINED is the preferable option, which pipelines most
data exchanges and only uses batch data exchanges in situations that are
susceptible to deadlocks.
This option should only be used with care and only in situations where the
programmer is sure that the program is safe for full pipelining and that
Flink was too conservative when choosing the batch exchange at a certain
point.
:data:`BATCH`:
This mode executes all shuffles and broadcasts in a batch fashion, while
pipelining data between operations that exchange data only locally
between one producer and one consumer.
:data:`BATCH_FORCED`:
This mode executes the program in a strict batch way, including all points
where data is forwarded locally from one producer to one consumer. This mode
is typically more expensive to execute than the BATCH mode. It does
guarantee that no successive operations are ever executed concurrently.
"""
PIPELINED = 0
PIPELINED_FORCED = 1
BATCH = 2
BATCH_FORCED = 3
@staticmethod
def _from_j_execution_mode(j_execution_mode) -> 'ExecutionMode':
return ExecutionMode[j_execution_mode.name()]
def _to_j_execution_mode(self):
gateway = get_gateway()
JExecutionMode = gateway.jvm.org.apache.flink.api.common.ExecutionMode
return getattr(JExecutionMode, self.name)
| 3,280 | 38.53012 | 84 |
py
|
flink
|
flink-master/flink-python/pyflink/common/config_options.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import TypeVar, Generic
from pyflink.java_gateway import get_gateway
T = TypeVar('T')
__all__ = ['ConfigOptions', 'ConfigOption']
class ConfigOptions(object):
"""
{@code ConfigOptions} are used to build a :class:`~pyflink.common.ConfigOption`. The option is
typically built in one of the following patterns:
Example:
::
# simple string-valued option with a default value
>>> ConfigOptions.key("tmp.dir").string_type().default_value("/tmp")
# simple integer-valued option with a default value
>>> ConfigOptions.key("application.parallelism").int_type().default_value(100)
# option with no default value
>>> ConfigOptions.key("user.name").string_type().no_default_value()
"""
def __init__(self, j_config_options):
self._j_config_options = j_config_options
@staticmethod
def key(key: str):
"""
Starts building a new ConfigOption.
:param key: The key for the config option.
:return: The builder for the config option with the given key.
"""
gateway = get_gateway()
j_option_builder = gateway.jvm.org.apache.flink.configuration.ConfigOptions.key(key)
return ConfigOptions.OptionBuilder(j_option_builder)
class OptionBuilder(object):
def __init__(self, j_option_builder):
self._j_option_builder = j_option_builder
def boolean_type(self) -> 'ConfigOptions.TypedConfigOptionBuilder[bool]':
"""
Defines that the value of the option should be of bool type.
"""
return ConfigOptions.TypedConfigOptionBuilder(self._j_option_builder.booleanType())
def int_type(self) -> 'ConfigOptions.TypedConfigOptionBuilder[int]':
"""
Defines that the value of the option should be of int type
(from -2,147,483,648 to 2,147,483,647).
"""
return ConfigOptions.TypedConfigOptionBuilder(self._j_option_builder.intType())
def long_type(self) -> 'ConfigOptions.TypedConfigOptionBuilder[int]':
"""
Defines that the value of the option should be of int type
(from -9,223,372,036,854,775,808 to 9,223,372,036,854,775,807).
"""
return ConfigOptions.TypedConfigOptionBuilder(self._j_option_builder.longType())
def float_type(self) -> 'ConfigOptions.TypedConfigOptionBuilder[float]':
"""
Defines that the value of the option should be of float type
(4-byte single precision floating point number).
"""
return ConfigOptions.TypedConfigOptionBuilder(self._j_option_builder.floatType())
def double_type(self) -> 'ConfigOptions.TypedConfigOptionBuilder[float]':
"""
Defines that the value of the option should be of float Double} type
(8-byte double precision floating point number).
"""
return ConfigOptions.TypedConfigOptionBuilder(self._j_option_builder.doubleType())
def string_type(self) -> 'ConfigOptions.TypedConfigOptionBuilder[str]':
"""
Defines that the value of the option should be of str type.
"""
return ConfigOptions.TypedConfigOptionBuilder(self._j_option_builder.stringType())
class TypedConfigOptionBuilder(Generic[T]):
def __init__(self, j_typed_config_option_builder):
self._j_typed_config_option_builder = j_typed_config_option_builder
def default_value(self, value: T) -> 'ConfigOption[T]':
return ConfigOption(self._j_typed_config_option_builder.defaultValue(value))
def no_default_value(self) -> 'ConfigOption[str]':
return ConfigOption(self._j_typed_config_option_builder.noDefaultValue())
class ConfigOption(Generic[T]):
"""
A {@code ConfigOption} describes a configuration parameter. It encapsulates the configuration
key, deprecated older versions of the key, and an optional default value for the configuration
parameter.
{@code ConfigOptions} are built via the ConfigOptions class. Once created, a config
option is immutable.
"""
def __init__(self, j_config_option):
self._j_config_option = j_config_option
| 5,257 | 41.064 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/common/constants.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
A constant holding the maximum value a long can have, 2^63 – 1.
"""
MAX_LONG_VALUE = 0x7fffffffffffffff
"""
A constant holding the minimum value a long can have, -2^63
"""
MIN_LONG_VALUE = - MAX_LONG_VALUE - 1
"""
Output tag for main output.
"""
DEFAULT_OUTPUT_TAG = ""
| 1,234 | 37.59375 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/common/completable_future.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from py4j.protocol import Py4JJavaError
from pyflink.util.exceptions import convert_py4j_exception
__all__ = ['CompletableFuture']
class CompletableFuture(object):
"""
A Future that may be explicitly completed (setting its value and status), supporting dependent
functions and actions that trigger upon its completion.
When two or more threads attempt to set_result, set_exception, or cancel a CompletableFuture,
only one of them succeeds.
.. versionadded:: 1.11.0
"""
def __init__(self, j_completable_future, py_class=None):
self._j_completable_future = j_completable_future
self._py_class = py_class
def cancel(self) -> bool:
"""
Completes this CompletableFuture if not already completed.
:return: true if this task is now cancelled
.. versionadded:: 1.11.0
"""
return self._j_completable_future.cancel(True)
def cancelled(self) -> bool:
"""
Returns true if this CompletableFuture was cancelled before it completed normally.
.. versionadded:: 1.11.0
"""
return self._j_completable_future.isCancelled()
def done(self) -> bool:
"""
Returns true if completed in any fashion: normally, exceptionally, or via cancellation.
.. versionadded:: 1.11.0
"""
return self._j_completable_future.isDone()
def result(self):
"""
Waits if necessary for this future to complete, and then returns its result.
:return: the result value
.. versionadded:: 1.11.0
"""
if self._py_class is None:
return self._j_completable_future.get()
else:
return self._py_class(self._j_completable_future.get())
def exception(self):
"""
Returns the exception that was set on this future or None if no exception was set.
.. versionadded:: 1.11.0
"""
if self._j_completable_future.isCompletedExceptionally():
try:
self._j_completable_future.getNow(None)
except Py4JJavaError as e:
return convert_py4j_exception(e)
else:
return None
def __str__(self):
return self._j_completable_future.toString()
| 3,240 | 32.760417 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/common/utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
class JavaObjectWrapper(object):
def __init__(self, j_object):
self._j_object = j_object
def get_java_object(self):
return self._j_object
| 1,123 | 42.230769 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/common/serializer.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import abstractmethod, ABC
from io import BytesIO
from typing import TypeVar, Generic
T = TypeVar('T')
__all__ = ['TypeSerializer']
class TypeSerializer(ABC, Generic[T]):
"""
This interface describes the methods that are required for a data type to be handled by the
Flink runtime. Specifically, this interface contains the serialization and deserialization
methods.
"""
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
@abstractmethod
def serialize(self, element: T, stream: BytesIO) -> None:
"""
Serializes an element to the output stream.
"""
pass
@abstractmethod
def deserialize(self, stream: BytesIO) -> T:
"""
Returns a deserialized element from the input stream.
"""
pass
def _get_coder(self):
serialize_func = self.serialize
deserialize_func = self.deserialize
class CoderAdapter(object):
def get_impl(self):
return CoderAdapterIml()
class CoderAdapterIml(object):
def encode_nested(self, element):
bytes_io = BytesIO()
serialize_func(element, bytes_io)
return bytes_io.getvalue()
def decode_nested(self, bytes_data):
bytes_io = BytesIO(bytes_data)
return deserialize_func(bytes_io)
return CoderAdapter()
void = b''
class VoidNamespaceSerializer(TypeSerializer[bytes]):
def serialize(self, element: bytes, stream: BytesIO) -> None:
pass
def deserialize(self, stream: BytesIO) -> bytes:
return void
| 2,855 | 29.709677 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/common/configuration.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Set, Dict
from py4j.java_gateway import JavaObject
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import add_jars_to_context_class_loader
class Configuration:
"""
Lightweight configuration object which stores key/value pairs.
"""
def __init__(self, other: 'Configuration' = None, j_configuration: JavaObject = None):
"""
Creates a new configuration.
:param other: Optional, if this parameter exists, creates a new configuration with a
copy of the given configuration.
:param j_configuration: Optional, the py4j java configuration object, if this parameter
exists, creates a wrapper for it.
"""
if j_configuration is not None:
self._j_configuration = j_configuration
else:
gateway = get_gateway()
JConfiguration = gateway.jvm.org.apache.flink.configuration.Configuration
if other is not None:
self._j_configuration = JConfiguration(other._j_configuration)
else:
self._j_configuration = JConfiguration()
def get_string(self, key: str, default_value: str) -> str:
"""
Returns the value associated with the given key as a string.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
return self._j_configuration.getString(key, default_value)
def set_string(self, key: str, value: str) -> 'Configuration':
"""
Adds the given key/value pair to the configuration object.
:param key: The key of the key/value pair to be added.
:param value: The value of the key/value pair to be added.
"""
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
if key in [jars_key, classpaths_key]:
add_jars_to_context_class_loader(value.split(";"))
self._j_configuration.setString(key, value)
return self
def get_integer(self, key: str, default_value: int) -> int:
"""
Returns the value associated with the given key as an integer.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
return self._j_configuration.getLong(key, default_value)
def set_integer(self, key: str, value: int) -> 'Configuration':
"""
Adds the given key/value pair to the configuration object.
:param key: The key of the key/value pair to be added.
:param value: The value of the key/value pair to be added.
"""
self._j_configuration.setLong(key, value)
return self
def get_boolean(self, key: str, default_value: bool) -> bool:
"""
Returns the value associated with the given key as a boolean.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
return self._j_configuration.getBoolean(key, default_value)
def set_boolean(self, key: str, value: bool) -> 'Configuration':
"""
Adds the given key/value pair to the configuration object.
:param key: The key of the key/value pair to be added.
:param value: The value of the key/value pair to be added.
"""
self._j_configuration.setBoolean(key, value)
return self
def get_float(self, key: str, default_value: float) -> float:
"""
Returns the value associated with the given key as a float.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
return self._j_configuration.getDouble(key, float(default_value))
def set_float(self, key: str, value: float) -> 'Configuration':
"""
Adds the given key/value pair to the configuration object.
:param key: The key of the key/value pair to be added.
:param value: The value of the key/value pair to be added.
"""
self._j_configuration.setDouble(key, float(value))
return self
def get_bytearray(self, key: str, default_value: bytearray) -> bytearray:
"""
Returns the value associated with the given key as a byte array.
:param key: The key pointing to the associated value.
:param default_value: The default value which is returned in case there is no value
associated with the given key.
:return: The (default) value associated with the given key.
"""
return bytearray(self._j_configuration.getBytes(key, default_value))
def set_bytearray(self, key: str, value: bytearray) -> 'Configuration':
"""
Adds the given byte array to the configuration object.
:param key: The key under which the bytes are added.
:param value: The byte array to be added.
"""
self._j_configuration.setBytes(key, value)
return self
def key_set(self) -> Set[str]:
"""
Returns the keys of all key/value pairs stored inside this configuration object.
:return: The keys of all key/value pairs stored inside this configuration object.
"""
return set(self._j_configuration.keySet())
def add_all_to_dict(self, target_dict: Dict):
"""
Adds all entries in this configuration to the given dict.
:param target_dict: The dict to be updated.
"""
properties = get_gateway().jvm.java.util.Properties()
self._j_configuration.addAllToProperties(properties)
target_dict.update(properties)
def add_all(self, other: 'Configuration', prefix: str = None) -> 'Configuration':
"""
Adds all entries from the given configuration into this configuration. The keys are
prepended with the given prefix if exist.
:param other: The configuration whose entries are added to this configuration.
:param prefix: Optional, the prefix to prepend.
"""
if prefix is None:
self._j_configuration.addAll(other._j_configuration)
else:
self._j_configuration.addAll(other._j_configuration, prefix)
return self
def contains_key(self, key: str) -> bool:
"""
Checks whether there is an entry with the specified key.
:param key: Key of entry.
:return: True if the key is stored, false otherwise.
"""
return self._j_configuration.containsKey(key)
def to_dict(self) -> Dict[str, str]:
"""
Converts the configuration into a dict representation of string key-pair.
:return: Dict representation of the configuration.
"""
return dict(self._j_configuration.toMap())
def remove_config(self, key: str) -> bool:
"""
Removes given config key from the configuration.
:param key: The config key to remove.
:return: True if config has been removed, false otherwise.
"""
gateway = get_gateway()
JConfigOptions = gateway.jvm.org.apache.flink.configuration.ConfigOptions
config_option = JConfigOptions.key(key).noDefaultValue()
return self._j_configuration.removeConfig(config_option)
def __deepcopy__(self, memodict=None):
return Configuration(j_configuration=self._j_configuration.clone())
def __hash__(self):
return self._j_configuration.hashCode()
def __eq__(self, other):
if isinstance(other, Configuration):
return self._j_configuration.equals(other._j_configuration)
else:
return False
def __str__(self):
return self._j_configuration.toString()
| 9,560 | 39.685106 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/common/types.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from typing import List
from pyflink.java_gateway import get_gateway
__all__ = ['Row', 'RowKind']
class RowKind(Enum):
INSERT = 0
UPDATE_BEFORE = 1
UPDATE_AFTER = 2
DELETE = 3
def __str__(self):
if self.value == RowKind.INSERT.value:
return '+I'
elif self.value == RowKind.UPDATE_BEFORE.value:
return '-U'
elif self.value == RowKind.UPDATE_AFTER.value:
return '+U'
else:
return '-D'
def to_j_row_kind(self):
JRowKind = get_gateway().jvm.org.apache.flink.types.RowKind
return getattr(JRowKind, self.name)
def _create_row(fields, values, row_kind: RowKind = None):
row = Row(*values)
if fields is not None:
row._fields = fields
if row_kind is not None:
row.set_row_kind(row_kind)
return row
class Row(object):
"""
A row in Table.
The fields in it can be accessed:
* like attributes (``row.key``)
* like dictionary values (``row[key]``)
``key in row`` will search through row keys.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names. It is not allowed to omit
a named argument to represent the value is None or missing. This should be
explicitly set to None in this case.
::
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row['name'], row['age']
('Alice', 11)
>>> row.name, row.age
('Alice', 11)
>>> 'name' in row
True
>>> 'wrong_key' in row
False
Row can also be used to create another Row like class, then it
could be used to create Row objects, such as
::
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> 'name' in Person
True
>>> 'wrong_key' in Person
False
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __init__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if kwargs:
self._fields = list(kwargs.keys())
self._values = [kwargs[n] for n in self._fields]
self._from_dict = True
else:
self._values = list(args)
self._row_kind = RowKind.INSERT
def as_dict(self, recursive=False):
"""
Returns as a dict.
Example:
::
>>> Row(name="Alice", age=11).as_dict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.as_dict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.as_dict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
:param recursive: turns the nested Row as dict (default: False).
"""
if not hasattr(self, "_fields"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.as_dict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self._fields, (conv(o) for o in self)))
else:
return dict(zip(self._fields, self))
def get_row_kind(self) -> RowKind:
return self._row_kind
def set_row_kind(self, row_kind: RowKind):
self._row_kind = row_kind
def set_field_names(self, field_names: List):
self._fields = field_names
def get_fields_by_names(self, names: List[str]):
if not hasattr(self, '_fields') or names == self._fields:
return self._values
difference = list(set(names).difference(set(self._fields)))
if difference:
raise Exception("Field names {0} not exist in {1}.".format(difference, self._fields))
else:
return [self._values[self._fields.index(name)] for name in names]
def _is_retract_msg(self):
return self._row_kind == RowKind.UPDATE_BEFORE or self._row_kind == RowKind.DELETE
def _is_accumulate_msg(self):
return self._row_kind == RowKind.UPDATE_AFTER or self._row_kind == RowKind.INSERT
@staticmethod
def of_kind(row_kind: RowKind, *args, **kwargs):
row = Row(*args, **kwargs)
row.set_row_kind(row_kind)
return row
def __contains__(self, item):
return item in self._values
# let object acts like class
def __call__(self, *args):
"""
Creates new Row object
"""
if len(args) > len(self):
raise ValueError("Can not create Row with fields %s, expected %d values "
"but got %s" % (self, len(self), args))
return _create_row(self._values, args, self._row_kind)
def __getitem__(self, item):
if isinstance(item, (int, slice)):
return self._values[item]
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self._fields.index(item)
return self._values[idx]
except IndexError:
raise KeyError(item)
except ValueError:
raise ValueError(item)
def __setitem__(self, key, value):
if isinstance(key, (int, slice)):
self._values[key] = value
return
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self._fields.index(key)
self._values[idx] = value
except (IndexError, AttributeError):
raise KeyError(key)
except ValueError:
raise ValueError(value)
def __getattr__(self, item):
if item.startswith("_"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self._fields.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '_fields' and key != "_from_dict" and key != "_row_kind" and key != "_values":
raise AttributeError(key)
self.__dict__[key] = value
def __reduce__(self):
"""
Returns a tuple so Python knows how to pickle Row.
"""
if hasattr(self, "_fields"):
return _create_row, (self._fields, tuple(self), self._row_kind)
else:
return _create_row, (None, tuple(self), self._row_kind)
def __repr__(self):
"""
Printable representation of Row used in Python REPL.
"""
if hasattr(self, "_fields"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self._fields, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(repr(field) for field in self)
def __eq__(self, other):
if not isinstance(other, Row):
return False
if hasattr(self, "_fields"):
if not hasattr(other, "_fields"):
return False
if sorted(self._fields) != sorted(other._fields):
return False
sorted_fields = sorted(self._fields)
return (self.__class__ == other.__class__ and
self._row_kind == other._row_kind and
[self._values[self._fields.index(name)] for name in sorted_fields] ==
[other._values[other._fields.index(name)] for name in sorted_fields])
else:
if hasattr(other, "_fields"):
return False
return (self.__class__ == other.__class__ and
self._row_kind == other._row_kind and
self._values == other._values)
def __hash__(self):
return tuple(self).__hash__()
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
| 9,444 | 32.257042 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/common/input_dependency_constraint.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.java_gateway import get_gateway
__all__ = ['InputDependencyConstraint']
class InputDependencyConstraint(Enum):
"""
This constraint indicates when a task should be scheduled considering its inputs status.
:data:`ANY`:
Schedule the task if any input is consumable.
:data:`ALL`:
Schedule the task if all the inputs are consumable.
"""
ANY = 0
ALL = 1
@staticmethod
def _from_j_input_dependency_constraint(j_input_dependency_constraint) \
-> 'InputDependencyConstraint':
return InputDependencyConstraint[j_input_dependency_constraint.name()]
def _to_j_input_dependency_constraint(self):
gateway = get_gateway()
JInputDependencyConstraint = gateway.jvm.org.apache.flink.api.common \
.InputDependencyConstraint
return getattr(JInputDependencyConstraint, self.name)
| 1,867 | 35.627451 | 92 |
py
|
flink
|
flink-master/flink-python/pyflink/common/time.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.java_gateway import get_gateway
__all__ = ['Duration', 'Instant', 'Time']
class Duration(object):
"""
A time-based amount of time, such as '34.5 seconds'.
"""
def __init__(self, j_duration):
self._j_duration = j_duration
@staticmethod
def of_days(days: int):
return Duration(get_gateway().jvm.java.time.Duration.ofDays(days))
@staticmethod
def of_hours(hours: int):
return Duration(get_gateway().jvm.java.time.Duration.ofHours(hours))
@staticmethod
def of_millis(millis: int):
return Duration(get_gateway().jvm.java.time.Duration.ofMillis(millis))
@staticmethod
def of_minutes(minutes: int):
return Duration(get_gateway().jvm.java.time.Duration.ofMinutes(minutes))
@staticmethod
def of_nanos(nanos: int):
return Duration(get_gateway().jvm.java.time.Duration.ofNanos(nanos))
@staticmethod
def of_seconds(seconds: int):
return Duration(get_gateway().jvm.java.time.Duration.ofSeconds(seconds))
def __eq__(self, other):
return isinstance(other, Duration) and self._j_duration.equals(other._j_duration)
class Instant(object):
"""
An instantaneous point on the time-line. Similar to Java.time.Instant.
"""
def __init__(self, seconds, nanos):
self.seconds = seconds
self.nanos = nanos
def to_epoch_milli(self):
if self.seconds < 0 < self.nanos:
return (self.seconds + 1) * 1000 + self.nanos // 1000_1000 - 1000
else:
return self.seconds * 1000 + self.nanos // 1000_000
@staticmethod
def of_epoch_milli(epoch_milli: int) -> 'Instant':
secs = epoch_milli // 1000
mos = epoch_milli % 1000
return Instant(secs, mos * 1000_000)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self.seconds == other.seconds and
self.nanos == other.nanos)
def __repr__(self):
return 'Instant<{}, {}>'.format(self.seconds, self.nanos)
class Time(object):
"""
The definition of a time interval.
"""
def __init__(self, milliseconds: int):
self._milliseconds = milliseconds
def to_milliseconds(self) -> int:
return self._milliseconds
@staticmethod
def milliseconds(milliseconds: int):
return Time(milliseconds)
@staticmethod
def seconds(seconds: int):
return Time.milliseconds(seconds * 1000)
@staticmethod
def minutes(minutes: int):
return Time.seconds(minutes * 60)
@staticmethod
def hours(hours: int):
return Time.minutes(hours * 60)
@staticmethod
def days(days: int):
return Time.hours(days * 24)
def __eq__(self, other):
return (self.__class__ == other.__class__ and
self._milliseconds == other._milliseconds)
def __str__(self):
return "{} ms".format(self._milliseconds)
| 3,915 | 30.328 | 89 |
py
|
flink
|
flink-master/flink-python/pyflink/common/watermark_strategy.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
from typing import Any, Optional
from pyflink.common.time import Duration
from pyflink.java_gateway import get_gateway
class WatermarkStrategy(object):
"""
The WatermarkStrategy defines how to generate Watermarks in the stream sources. The
WatermarkStrategy is a builder/factory for the WatermarkGenerator that generates the watermarks
and the TimestampAssigner which assigns the internal timestamp of a record.
The convenience methods, for example forBoundedOutOfOrderness(Duration), create a
WatermarkStrategy for common built in strategies.
"""
def __init__(self, j_watermark_strategy):
self._j_watermark_strategy = j_watermark_strategy
self._timestamp_assigner = None
def with_timestamp_assigner(self, timestamp_assigner: 'TimestampAssigner') -> \
'WatermarkStrategy':
"""
Creates a new WatermarkStrategy that wraps this strategy but instead uses the given a
TimestampAssigner by implementing TimestampAssigner interface.
Example:
::
>>> watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \\
>>> .with_timestamp_assigner(MyTimestampAssigner())
:param timestamp_assigner: The given TimestampAssigner.
:return: A WaterMarkStrategy that wraps a TimestampAssigner.
"""
self._timestamp_assigner = timestamp_assigner
return self
def with_idleness(self, idle_timeout: Duration) -> 'WatermarkStrategy':
"""
Creates a new enriched WatermarkStrategy that also does idleness detection in the created
WatermarkGenerator.
Example:
::
>>> WatermarkStrategy \\
... .for_bounded_out_of_orderness(Duration.of_seconds(20)) \\
... .with_idleness(Duration.of_minutes(1))
:param idle_timeout: The idle timeout.
:return: A new WatermarkStrategy with idle detection configured.
"""
return WatermarkStrategy(self._j_watermark_strategy.withIdleness(idle_timeout._j_duration))
def with_watermark_alignment(self, watermark_group: str, max_allowed_watermark_drift: Duration,
update_interval: Optional[Duration] = None) -> 'WatermarkStrategy':
"""
Creates a new :class:`WatermarkStrategy` that configures the maximum watermark drift from
other sources/tasks/partitions in the same watermark group. The group may contain completely
independent sources (e.g. File and Kafka).
Once configured Flink will "pause" consuming from a source/task/partition that is ahead
of the emitted watermark in the group by more than the maxAllowedWatermarkDrift.
Example:
::
>>> WatermarkStrategy \\
... .for_bounded_out_of_orderness(Duration.of_seconds(20)) \\
... .with_watermark_alignment("alignment-group-1", Duration.of_seconds(20),
... Duration.of_seconds(1))
:param watermark_group: A group of sources to align watermarks
:param max_allowed_watermark_drift: Maximal drift, before we pause consuming from the
source/task/partition
:param update_interval: How often tasks should notify coordinator about the current
watermark and how often the coordinator should announce the maximal aligned watermark.
If is None, default update interval (1000ms) is used.
:return: A new WatermarkStrategy with watermark alignment configured.
.. versionadded:: 1.16.0
"""
if update_interval is None:
return WatermarkStrategy(self._j_watermark_strategy.withWatermarkAlignment(
watermark_group, max_allowed_watermark_drift._j_duration
))
else:
return WatermarkStrategy(self._j_watermark_strategy.withWatermarkAlignment(
watermark_group,
max_allowed_watermark_drift._j_duration,
update_interval._j_duration,
))
@staticmethod
def for_monotonous_timestamps() -> 'WatermarkStrategy':
"""
Creates a watermark strategy for situations with monotonously ascending timestamps.
The watermarks are generated periodically and tightly follow the latest timestamp in the
data. The delay introduced by this strategy is mainly the periodic interval in which the
watermarks are generated.
"""
JWaterMarkStrategy = get_gateway().jvm\
.org.apache.flink.api.common.eventtime.WatermarkStrategy
return WatermarkStrategy(JWaterMarkStrategy.forMonotonousTimestamps())
@staticmethod
def for_bounded_out_of_orderness(max_out_of_orderness: Duration) -> 'WatermarkStrategy':
"""
Creates a watermark strategy for situations where records are out of order, but you can
place an upper bound on how far the events are out of order. An out-of-order bound B means
that once the an event with timestamp T was encountered, no events older than (T - B) will
follow any more.
"""
JWaterMarkStrategy = get_gateway().jvm \
.org.apache.flink.api.common.eventtime.WatermarkStrategy
return WatermarkStrategy(
JWaterMarkStrategy.forBoundedOutOfOrderness(max_out_of_orderness._j_duration))
@staticmethod
def no_watermarks() -> 'WatermarkStrategy':
"""
Creates a watermark strategy that generates no watermarks at all. This may be useful in
scenarios that do pure processing-time based stream processing.
.. versionadded:: 1.16.0
"""
JWaterMarkStrategy = get_gateway().jvm \
.org.apache.flink.api.common.eventtime.WatermarkStrategy
return WatermarkStrategy(JWaterMarkStrategy.noWatermarks())
class TimestampAssigner(abc.ABC):
"""
A TimestampAssigner assigns event time timestamps to elements. These timestamps are used by all
functions that operate on event time, for example event time windows.
Timestamps can be an arbitrary int value, but all built-in implementations represent it as the
milliseconds since the Epoch (midnight, January 1, 1970 UTC), the same way as time.time() does
it.
"""
@abc.abstractmethod
def extract_timestamp(self, value: Any, record_timestamp: int) -> int:
"""
Assigns a timestamp to an element, in milliseconds since the Epoch. This is independent of
any particular time zone or calendar.
The method is passed the previously assigned timestamp of the element.
That previous timestamp may have been assigned from a previous assigner. If the element did
not carry a timestamp before, this value is the minimum value of int type.
:param value: The element that the timestamp will be assigned to.
:param record_timestamp: The current internal timestamp of the element, or a negative value,
if no timestamp has been assigned yet.
:return: The new timestamp.
"""
pass
class AssignerWithPeriodicWatermarksWrapper(object):
"""
The AssignerWithPeriodicWatermarks assigns event time timestamps to elements, and generates
low watermarks that signal event time progress within the stream. These timestamps and
watermarks are used by functions and operators that operate on event time, for example event
time windows.
"""
def __init__(self, j_assigner_with_periodic_watermarks):
self._j_assigner_with_periodic_watermarks = j_assigner_with_periodic_watermarks
| 8,606 | 44.539683 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/execution_config.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import warnings
from typing import Dict, List
from pyflink.common.execution_mode import ExecutionMode
from pyflink.common.input_dependency_constraint import InputDependencyConstraint
from pyflink.common.restart_strategy import RestartStrategies, RestartStrategyConfiguration
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import load_java_class
__all__ = ['ExecutionConfig']
class ExecutionConfig(object):
"""
A config to define the behavior of the program execution. It allows to define (among other
options) the following settings:
- The default parallelism of the program, i.e., how many parallel tasks to use for
all functions that do not define a specific value directly.
- The number of retries in the case of failed executions.
- The delay between execution retries.
- The :class:`ExecutionMode` of the program: Batch or Pipelined.
The default execution mode is :data:`ExecutionMode.PIPELINED`
- Enabling or disabling the "closure cleaner". The closure cleaner pre-processes
the implementations of functions. In case they are (anonymous) inner classes,
it removes unused references to the enclosing class to fix certain serialization-related
problems and to reduce the size of the closure.
- The config allows to register types and serializers to increase the efficiency of
handling *generic types* and *POJOs*. This is usually only needed
when the functions return not only the types declared in their signature, but
also subclasses of those types.
:data:`PARALLELISM_DEFAULT`:
The flag value indicating use of the default parallelism. This value can
be used to reset the parallelism back to the default state.
:data:`PARALLELISM_UNKNOWN`:
The flag value indicating an unknown or unset parallelism. This value is
not a valid parallelism and indicates that the parallelism should remain
unchanged.
"""
PARALLELISM_DEFAULT = -1
PARALLELISM_UNKNOWN = -2
def __init__(self, j_execution_config):
self._j_execution_config = j_execution_config
def enable_closure_cleaner(self) -> 'ExecutionConfig':
"""
Enables the ClosureCleaner. This analyzes user code functions and sets fields to null
that are not used. This will in most cases make closures or anonymous inner classes
serializable that where not serializable due to some Scala or Java implementation artifact.
User code must be serializable because it needs to be sent to worker nodes.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableClosureCleaner()
return self
def disable_closure_cleaner(self) -> 'ExecutionConfig':
"""
Disables the ClosureCleaner.
.. seealso:: :func:`enable_closure_cleaner`
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableClosureCleaner()
return self
def is_closure_cleaner_enabled(self) -> bool:
"""
Returns whether the ClosureCleaner is enabled.
.. seealso:: :func:`enable_closure_cleaner`
:return: ``True`` means enable and ``False`` means disable.
"""
return self._j_execution_config.isClosureCleanerEnabled()
def set_auto_watermark_interval(self, interval: int) -> 'ExecutionConfig':
"""
Sets the interval of the automatic watermark emission. Watermarks are used throughout
the streaming system to keep track of the progress of time. They are used, for example,
for time based windowing.
:param interval: The integer value interval between watermarks in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setAutoWatermarkInterval(interval)
return self
def get_auto_watermark_interval(self) -> int:
"""
Returns the interval of the automatic watermark emission.
.. seealso:: :func:`set_auto_watermark_interval`
:return: The integer value interval in milliseconds of the automatic watermark emission.
"""
return self._j_execution_config.getAutoWatermarkInterval()
def set_latency_tracking_interval(self, interval: int) -> 'ExecutionConfig':
"""
Interval for sending latency tracking marks from the sources to the sinks.
Flink will send latency tracking marks from the sources at the specified interval.
Setting a tracking interval <= 0 disables the latency tracking.
:param interval: Integer value interval in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setLatencyTrackingInterval(interval)
return self
def get_latency_tracking_interval(self) -> int:
"""
Returns the latency tracking interval.
:return: The latency tracking interval in milliseconds.
"""
return self._j_execution_config.getLatencyTrackingInterval()
def get_parallelism(self) -> int:
"""
Gets the parallelism with which operation are executed by default. Operations can
individually override this value to use a specific parallelism.
Other operations may need to run with a different parallelism - for example calling
a reduce operation over the entire data set will involve an operation that runs
with a parallelism of one (the final reduce to the single result value).
:return: The parallelism used by operations, unless they override that value. This method
returns :data:`ExecutionConfig.PARALLELISM_DEFAULT` if the environment's default
parallelism should be used.
"""
return self._j_execution_config.getParallelism()
def set_parallelism(self, parallelism: int) -> 'ExecutionConfig':
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as join, map, reduce) to run
with x parallel instances.
This method overrides the default parallelism for this environment.
The local execution environment uses by default a value equal to the number of hardware
contexts (CPU cores / threads). When executing the program via the command line client
from a JAR/Python file, the default parallelism is the one configured for that setup.
:param parallelism: The parallelism to use.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setParallelism(parallelism)
return self
def get_max_parallelism(self) -> int:
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_execution_config.getMaxParallelism()
def set_max_parallelism(self, max_parallelism: int) -> 'ExecutionConfig':
"""
Sets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program.
"""
self._j_execution_config.setMaxParallelism(max_parallelism)
return self
def get_task_cancellation_interval(self) -> int:
"""
Gets the interval (in milliseconds) between consecutive attempts to cancel a running task.
:return: The integer value interval in milliseconds.
"""
return self._j_execution_config.getTaskCancellationInterval()
def set_task_cancellation_interval(self, interval: int) -> 'ExecutionConfig':
"""
Sets the configuration parameter specifying the interval (in milliseconds)
between consecutive attempts to cancel a running task.
:param interval: The integer value interval in milliseconds.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setTaskCancellationInterval(interval)
return self
def get_task_cancellation_timeout(self) -> int:
"""
Returns the timeout (in milliseconds) after which an ongoing task
cancellation leads to a fatal TaskManager error.
The value ``0`` means that the timeout is disabled. In
this case a stuck cancellation will not lead to a fatal error.
:return: The timeout in milliseconds.
"""
return self._j_execution_config.getTaskCancellationTimeout()
def set_task_cancellation_timeout(self, timeout: int) -> 'ExecutionConfig':
"""
Sets the timeout (in milliseconds) after which an ongoing task cancellation
is considered failed, leading to a fatal TaskManager error.
The cluster default is configured via ``TaskManagerOptions#TASK_CANCELLATION_TIMEOUT``.
The value ``0`` disables the timeout. In this case a stuck
cancellation will not lead to a fatal error.
:param timeout: The task cancellation timeout (in milliseconds).
:return: This object.
"""
self._j_execution_config = self._j_execution_config.setTaskCancellationTimeout(timeout)
return self
def set_restart_strategy(
self,
restart_strategy_configuration: RestartStrategyConfiguration) -> 'ExecutionConfig':
"""
Sets the restart strategy to be used for recovery.
::
>>> config = env.get_config()
>>> config.set_restart_strategy(RestartStrategies.fixed_delay_restart(10, 1000))
The restart strategy configurations are all created from :class:`RestartStrategies`.
:param restart_strategy_configuration: Configuration defining the restart strategy to use.
"""
self._j_execution_config.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
return self
def get_restart_strategy(self) -> RestartStrategyConfiguration:
"""
Returns the restart strategy which has been set for the current job.
.. seealso:: :func:`set_restart_strategy`
:return: The specified restart configuration.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_execution_config.getRestartStrategy())
def set_execution_mode(self, execution_mode: ExecutionMode) -> 'ExecutionConfig':
"""
Sets the execution mode to execute the program. The execution mode defines whether
data exchanges are performed in a batch or on a pipelined manner.
The default execution mode is :data:`ExecutionMode.PIPELINED`.
Example:
::
>>> config.set_execution_mode(ExecutionMode.BATCH)
:param execution_mode: The execution mode to use. The execution mode could be
:data:`ExecutionMode.PIPELINED`,
:data:`ExecutionMode.PIPELINED_FORCED`,
:data:`ExecutionMode.BATCH` or
:data:`ExecutionMode.BATCH_FORCED`.
"""
self._j_execution_config.setExecutionMode(execution_mode._to_j_execution_mode())
return self
def get_execution_mode(self) -> 'ExecutionMode':
"""
Gets the execution mode used to execute the program. The execution mode defines whether
data exchanges are performed in a batch or on a pipelined manner.
The default execution mode is :data:`ExecutionMode.PIPELINED`.
.. seealso:: :func:`set_execution_mode`
:return: The execution mode for the program.
"""
j_execution_mode = self._j_execution_config.getExecutionMode()
return ExecutionMode._from_j_execution_mode(j_execution_mode)
def set_default_input_dependency_constraint(
self, input_dependency_constraint: InputDependencyConstraint) -> 'ExecutionConfig':
"""
Sets the default input dependency constraint for vertex scheduling. It indicates when a
task should be scheduled considering its inputs status.
The default constraint is :data:`InputDependencyConstraint.ANY`.
Example:
::
>>> config.set_default_input_dependency_constraint(InputDependencyConstraint.ALL)
:param input_dependency_constraint: The input dependency constraint. The constraints could
be :data:`InputDependencyConstraint.ANY` or
:data:`InputDependencyConstraint.ALL`.
.. note:: Deprecated in 1.13. :class:`InputDependencyConstraint` is not used anymore in the
current scheduler implementations.
"""
warnings.warn("Deprecated in 1.13. InputDependencyConstraint is not used anywhere. "
"Therefore, the method call set_default_input_dependency_constraint is "
"obsolete.", DeprecationWarning)
self._j_execution_config.setDefaultInputDependencyConstraint(
input_dependency_constraint._to_j_input_dependency_constraint())
return self
def get_default_input_dependency_constraint(self) -> 'InputDependencyConstraint':
"""
Gets the default input dependency constraint for vertex scheduling. It indicates when a
task should be scheduled considering its inputs status.
The default constraint is :data:`InputDependencyConstraint.ANY`.
.. seealso:: :func:`set_default_input_dependency_constraint`
:return: The input dependency constraint of this job. The possible constraints are
:data:`InputDependencyConstraint.ANY` and :data:`InputDependencyConstraint.ALL`.
.. note:: Deprecated in 1.13. :class:`InputDependencyConstraint` is not used anymore in the
current scheduler implementations.
"""
warnings.warn("Deprecated in 1.13. InputDependencyConstraint is not used anywhere. "
"Therefore, the method call get_default_input_dependency_constraint is "
"obsolete.", DeprecationWarning)
j_input_dependency_constraint = self._j_execution_config\
.getDefaultInputDependencyConstraint()
return InputDependencyConstraint._from_j_input_dependency_constraint(
j_input_dependency_constraint)
def enable_force_kryo(self) -> 'ExecutionConfig':
"""
Force TypeExtractor to use Kryo serializer for POJOS even though we could analyze as POJO.
In some cases this might be preferable. For example, when using interfaces
with subclasses that cannot be analyzed as POJO.
"""
self._j_execution_config.enableForceKryo()
return self
def disable_force_kryo(self) -> 'ExecutionConfig':
"""
Disable use of Kryo serializer for all POJOs.
"""
self._j_execution_config.disableForceKryo()
return self
def is_force_kryo_enabled(self) -> bool:
"""
:return: Boolean value that represent whether the usage of Kryo serializer for all POJOs
is enabled.
"""
return self._j_execution_config.isForceKryoEnabled()
def enable_generic_types(self) -> 'ExecutionConfig':
"""
Enables the use generic types which are serialized via Kryo.
Generic types are enabled by default.
.. seealso:: :func:`disable_generic_types`
"""
self._j_execution_config.enableGenericTypes()
return self
def disable_generic_types(self) -> 'ExecutionConfig':
"""
Disables the use of generic types (types that would be serialized via Kryo). If this option
is used, Flink will throw an ``UnsupportedOperationException`` whenever it encounters
a data type that would go through Kryo for serialization.
Disabling generic types can be helpful to eagerly find and eliminate the use of types
that would go through Kryo serialization during runtime. Rather than checking types
individually, using this option will throw exceptions eagerly in the places where generic
types are used.
**Important:** We recommend to use this option only during development and pre-production
phases, not during actual production use. The application program and/or the input data may
be such that new, previously unseen, types occur at some point. In that case, setting this
option would cause the program to fail.
.. seealso:: :func:`enable_generic_types`
"""
self._j_execution_config.disableGenericTypes()
return self
def has_generic_types_disabled(self) -> bool:
"""
Checks whether generic types are supported. Generic types are types that go through Kryo
during serialization.
Generic types are enabled by default.
.. seealso:: :func:`enable_generic_types`
.. seealso:: :func:`disable_generic_types`
:return: Boolean value that represent whether the generic types are supported.
"""
return self._j_execution_config.hasGenericTypesDisabled()
def enable_auto_generated_uids(self) -> 'ExecutionConfig':
"""
Enables the Flink runtime to auto-generate UID's for operators.
.. seealso:: :func:`disable_auto_generated_uids`
"""
self._j_execution_config.enableAutoGeneratedUIDs()
return self
def disable_auto_generated_uids(self) -> 'ExecutionConfig':
"""
Disables auto-generated UIDs. Forces users to manually specify UIDs
on DataStream applications.
It is highly recommended that users specify UIDs before deploying to
production since they are used to match state in savepoints to operators
in a job. Because auto-generated ID's are likely to change when modifying
a job, specifying custom IDs allow an application to evolve overtime
without discarding state.
"""
self._j_execution_config.disableAutoGeneratedUIDs()
return self
def has_auto_generated_uids_enabled(self) -> bool:
"""
Checks whether auto generated UIDs are supported.
Auto generated UIDs are enabled by default.
.. seealso:: :func:`enable_auto_generated_uids`
.. seealso:: :func:`disable_auto_generated_uids`
:return: Boolean value that represent whether auto generated UIDs are supported.
"""
return self._j_execution_config.hasAutoGeneratedUIDsEnabled()
def enable_force_avro(self) -> 'ExecutionConfig':
"""
Forces Flink to use the Apache Avro serializer for POJOs.
**Important:** Make sure to include the *flink-avro* module.
"""
self._j_execution_config.enableForceAvro()
return self
def disable_force_avro(self) -> 'ExecutionConfig':
"""
Disables the Apache Avro serializer as the forced serializer for POJOs.
"""
self._j_execution_config.disableForceAvro()
return self
def is_force_avro_enabled(self) -> bool:
"""
Returns whether the Apache Avro is the default serializer for POJOs.
:return: Boolean value that represent whether the Apache Avro is the default serializer
for POJOs.
"""
return self._j_execution_config.isForceAvroEnabled()
def enable_object_reuse(self) -> 'ExecutionConfig':
"""
Enables reusing objects that Flink internally uses for deserialization and passing
data to user-code functions. Keep in mind that this can lead to bugs when the
user-code function of an operation is not aware of this behaviour.
:return: This object.
"""
self._j_execution_config = self._j_execution_config.enableObjectReuse()
return self
def disable_object_reuse(self) -> 'ExecutionConfig':
"""
Disables reusing objects that Flink internally uses for deserialization and passing
data to user-code functions.
.. seealso:: :func:`enable_object_reuse`
:return: This object.
"""
self._j_execution_config = self._j_execution_config.disableObjectReuse()
return self
def is_object_reuse_enabled(self) -> bool:
"""
Returns whether object reuse has been enabled or disabled.
.. seealso:: :func:`enable_object_reuse`
:return: Boolean value that represent whether object reuse has been enabled or disabled.
"""
return self._j_execution_config.isObjectReuseEnabled()
def get_global_job_parameters(self) -> Dict[str, str]:
"""
Gets current configuration dict.
:return: The configuration dict.
"""
return dict(self._j_execution_config.getGlobalJobParameters().toMap())
def set_global_job_parameters(self, global_job_parameters_dict: Dict) -> 'ExecutionConfig':
"""
Register a custom, serializable user configuration dict.
Example:
::
>>> config.set_global_job_parameters({"environment.checkpoint_interval": "1000"})
:param global_job_parameters_dict: Custom user configuration dict.
"""
gateway = get_gateway()
Configuration = gateway.jvm.org.apache.flink.configuration.Configuration
j_global_job_parameters = Configuration()
for key in global_job_parameters_dict:
if not isinstance(global_job_parameters_dict[key], str):
value = str(global_job_parameters_dict[key])
else:
value = global_job_parameters_dict[key]
j_global_job_parameters.setString(key, value)
self._j_execution_config.setGlobalJobParameters(j_global_job_parameters)
return self
def add_default_kryo_serializer(self,
type_class_name: str,
serializer_class_name: str) -> 'ExecutionConfig':
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> config.add_default_kryo_serializer("com.aaa.bbb.PojoClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_execution_config.addDefaultKryoSerializer(type_clz, j_serializer_clz)
return self
def register_type_with_kryo_serializer(self,
type_class_name: str,
serializer_class_name: str) -> 'ExecutionConfig':
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> config.register_type_with_kryo_serializer("com.aaa.bbb.PojoClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_execution_config.registerTypeWithKryoSerializer(type_clz, j_serializer_clz)
return self
def register_pojo_type(self, type_class_name: str) -> 'ExecutionConfig':
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> config.register_pojo_type("com.aaa.bbb.PojoClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_execution_config.registerPojoType(type_clz)
return self
def register_kryo_type(self, type_class_name: str) -> 'ExecutionConfig':
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> config.register_kryo_type("com.aaa.bbb.KryoClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_execution_config.registerKryoType(type_clz)
return self
def get_registered_types_with_kryo_serializer_classes(self) -> Dict[str, str]:
"""
Returns the registered types with their Kryo Serializer classes.
:return: The dict which the keys are full-qualified java class names of the registered
types and the values are full-qualified java class names of the Kryo Serializer
classes.
"""
j_clz_map = self._j_execution_config.getRegisteredTypesWithKryoSerializerClasses()
registered_serializers = {}
for key in j_clz_map:
registered_serializers[key.getName()] = j_clz_map[key].getName()
return registered_serializers
def get_default_kryo_serializer_classes(self) -> Dict[str, str]:
"""
Returns the registered default Kryo Serializer classes.
:return: The dict which the keys are full-qualified java class names of the registered
types and the values are full-qualified java class names of the Kryo default
Serializer classes.
"""
j_clz_map = self._j_execution_config.getDefaultKryoSerializerClasses()
default_kryo_serializers = {}
for key in j_clz_map:
default_kryo_serializers[key.getName()] = j_clz_map[key].getName()
return default_kryo_serializers
def get_registered_kryo_types(self) -> List[str]:
"""
Returns the registered Kryo types.
:return: The list of full-qualified java class names of the registered Kryo types.
"""
j_clz_set = self._j_execution_config.getRegisteredKryoTypes()
return [value.getName() for value in j_clz_set]
def get_registered_pojo_types(self) -> List[str]:
"""
Returns the registered POJO types.
:return: The list of full-qualified java class names of the registered POJO types.
"""
j_clz_set = self._j_execution_config.getRegisteredPojoTypes()
return [value.getName() for value in j_clz_set]
def is_auto_type_registration_disabled(self) -> bool:
"""
Returns whether Flink is automatically registering all types in the user programs with
Kryo.
:return: ``True`` means auto type registration is disabled and ``False`` means enabled.
"""
return self._j_execution_config.isAutoTypeRegistrationDisabled()
def disable_auto_type_registration(self) -> 'ExecutionConfig':
"""
Control whether Flink is automatically registering all types in the user programs with
Kryo.
"""
self._j_execution_config.disableAutoTypeRegistration()
return self
def is_use_snapshot_compression(self) -> bool:
"""
Returns whether he compression (snappy) for keyed state in full checkpoints and savepoints
is enabled.
:return: ``True`` means enabled and ``False`` means disabled.
"""
return self._j_execution_config.isUseSnapshotCompression()
def set_use_snapshot_compression(self, use_snapshot_compression: bool) -> 'ExecutionConfig':
"""
Control whether the compression (snappy) for keyed state in full checkpoints and savepoints
is enabled.
:param use_snapshot_compression: ``True`` means enabled and ``False`` means disabled.
"""
self._j_execution_config.setUseSnapshotCompression(use_snapshot_compression)
return self
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self._j_execution_config == other._j_execution_config
def __hash__(self):
return self._j_execution_config.hashCode()
| 30,297 | 40.39071 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/common/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Common classes used by both Flink DataStream API and Table API:
- :class:`Configuration`:
Lightweight configuration object which stores key/value pairs.
- :class:`ExecutionConfig`:
A config to define the behavior of the program execution.
- :class:`ExecutionMode`:
Specifies how a batch program is executed in terms of data exchange: pipelining or batched.
- :class:`TypeInformation`:
TypeInformation is the core class of Flink's type system. FLink requires a type information
for all types that are used as input or return type of a user function.
- :class:`Types`:
Contains utilities to access the :class:`TypeInformation` of the most common types for which
Flink has provided built-in implementation.
- :class:`WatermarkStrategy`:
Defines how to generate Watermarks in the stream sources.
- :class:`Row`:
A row is a fixed-length, null-aware composite type for storing multiple values in a
deterministic field order.
- :class:`SerializationSchema`:
Base class to describes how to turn a data object into a different serialized representation.
Most data sinks (for example Apache Kafka) require the data to be handed to them in a specific
format (for example as byte strings). See
:class:`~pyflink.datastream.formats.json.JsonRowSerializationSchema`,
:class:`~pyflink.datastream.formats.json.JsonRowDeserializationSchema`,
:class:`~pyflink.datastream.formats.csv.CsvRowSerializationSchema`,
:class:`~pyflink.datastream.formats.csv.CsvRowDeserializationSchema`,
:class:`~pyflink.datastream.formats.avro.AvroRowSerializationSchema`,
:class:`~pyflink.datastream.formats.avro.AvroRowDeserializationSchema` and
:class:`~SimpleStringSchema` for more details.
"""
from pyflink.common.completable_future import CompletableFuture
from pyflink.common.config_options import ConfigOption, ConfigOptions
from pyflink.common.configuration import Configuration
from pyflink.common.execution_config import ExecutionConfig
from pyflink.common.execution_mode import ExecutionMode
from pyflink.common.input_dependency_constraint import InputDependencyConstraint
from pyflink.common.job_client import JobClient
from pyflink.common.job_execution_result import JobExecutionResult
from pyflink.common.job_id import JobID
from pyflink.common.job_status import JobStatus
from pyflink.common.restart_strategy import RestartStrategies, RestartStrategyConfiguration
from pyflink.common.serialization import SerializationSchema, DeserializationSchema, \
SimpleStringSchema, Encoder
from pyflink.common.serializer import TypeSerializer
from pyflink.common.typeinfo import Types, TypeInformation
from pyflink.common.types import Row, RowKind
from pyflink.common.time import Duration, Instant, Time
from pyflink.common.watermark_strategy import WatermarkStrategy, \
AssignerWithPeriodicWatermarksWrapper
__all__ = [
'Configuration',
'ConfigOption',
'ConfigOptions',
'ExecutionConfig',
"TypeInformation",
"TypeSerializer",
"Types",
'SerializationSchema',
'DeserializationSchema',
'SimpleStringSchema',
'Encoder',
'CompletableFuture',
'ExecutionMode',
'InputDependencyConstraint',
'JobClient',
'JobExecutionResult',
'JobID',
'JobStatus',
'RestartStrategies',
'RestartStrategyConfiguration',
"Row",
"RowKind",
"WatermarkStrategy",
"Duration",
"Instant",
"Time",
"AssignerWithPeriodicWatermarksWrapper"
]
def _install():
from pyflink import common
# json
from pyflink.datastream.formats.json import JsonRowDeserializationSchema
from pyflink.datastream.formats.json import JsonRowSerializationSchema
setattr(common, 'JsonRowDeserializationSchema', JsonRowDeserializationSchema)
setattr(common, 'JsonRowSerializationSchema', JsonRowSerializationSchema)
# csv
from pyflink.datastream.formats.csv import CsvRowDeserializationSchema
from pyflink.datastream.formats.csv import CsvRowSerializationSchema
setattr(common, 'CsvRowDeserializationSchema', CsvRowDeserializationSchema)
setattr(common, 'CsvRowSerializationSchema', CsvRowSerializationSchema)
# avro
from pyflink.datastream.formats.avro import AvroRowDeserializationSchema
from pyflink.datastream.formats.avro import AvroRowSerializationSchema
setattr(common, 'AvroRowDeserializationSchema', AvroRowDeserializationSchema)
setattr(common, 'AvroRowSerializationSchema', AvroRowSerializationSchema)
# for backward compatibility
_install()
del _install
| 5,570 | 42.866142 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/typeinfo.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import calendar
import datetime
import time
from enum import Enum
from typing import List, Union
from py4j.java_gateway import JavaClass, JavaObject
from pyflink.java_gateway import get_gateway
__all__ = ['TypeInformation', 'Types']
class TypeInformation(object):
"""
TypeInformation is the core class of Flink's type system. FLink requires a type information
for all types that are used as input or return type of a user function. This type information
class acts as the tool to generate serializers and comparators, and to perform semantic checks
such as whether the fields that are used as join/grouping keys actually exist.
The type information also bridges between the programming languages object model and a logical
flat schema. It maps fields from the types to columns (fields) in a flat schema. Not all fields
from a type are mapped to a separate fields in the flat schema and often, entire types are
mapped to one field.. It is important to notice that the schema must hold for all instances of a
type. For that reason, elements in lists and arrays are not assigned to individual fields, but
the lists and arrays are considered to be one field in total, to account for different lengths
in the arrays.
a) Basic types are indivisible and are considered as a single field.
b) Arrays and collections are one field.
c) Tuples represents as many fields as the class has fields.
To represent this properly, each type has an arity (the number of fields it contains directly),
and a total number of fields (number of fields in the entire schema of this type, including
nested types).
"""
def __init__(self):
self._j_typeinfo = None
def get_java_type_info(self) -> JavaObject:
pass
def need_conversion(self):
"""
Does this type need to conversion between Python object and internal Wrapper object.
"""
return False
def to_internal_type(self, obj):
"""
Converts a Python object into an internal object.
"""
return obj
def from_internal_type(self, obj):
"""
Converts an internal object into a native Python object.
"""
return obj
class BasicType(Enum):
STRING = "String"
BYTE = "Byte"
BOOLEAN = "Boolean"
SHORT = "Short"
INT = "Integer"
LONG = "Long"
FLOAT = "Float"
DOUBLE = "Double"
CHAR = "Char"
BIG_INT = "BigInteger"
BIG_DEC = "BigDecimal"
INSTANT = "Instant"
class BasicTypeInfo(TypeInformation):
"""
Type information for primitive types (int, long, double, byte, ...), String, BigInteger,
and BigDecimal.
"""
def __init__(self, basic_type: BasicType):
self._basic_type = basic_type
super(BasicTypeInfo, self).__init__()
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
JBasicTypeInfo = get_gateway().jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo
if self._basic_type == BasicType.STRING:
self._j_typeinfo = JBasicTypeInfo.STRING_TYPE_INFO
elif self._basic_type == BasicType.BYTE:
self._j_typeinfo = JBasicTypeInfo.BYTE_TYPE_INFO
elif self._basic_type == BasicType.BOOLEAN:
self._j_typeinfo = JBasicTypeInfo.BOOLEAN_TYPE_INFO
elif self._basic_type == BasicType.SHORT:
self._j_typeinfo = JBasicTypeInfo.SHORT_TYPE_INFO
elif self._basic_type == BasicType.INT:
self._j_typeinfo = JBasicTypeInfo.INT_TYPE_INFO
elif self._basic_type == BasicType.LONG:
self._j_typeinfo = JBasicTypeInfo.LONG_TYPE_INFO
elif self._basic_type == BasicType.FLOAT:
self._j_typeinfo = JBasicTypeInfo.FLOAT_TYPE_INFO
elif self._basic_type == BasicType.DOUBLE:
self._j_typeinfo = JBasicTypeInfo.DOUBLE_TYPE_INFO
elif self._basic_type == BasicType.CHAR:
self._j_typeinfo = JBasicTypeInfo.CHAR_TYPE_INFO
elif self._basic_type == BasicType.BIG_INT:
self._j_typeinfo = JBasicTypeInfo.BIG_INT_TYPE_INFO
elif self._basic_type == BasicType.BIG_DEC:
self._j_typeinfo = JBasicTypeInfo.BIG_DEC_TYPE_INFO
elif self._basic_type == BasicType.INSTANT:
self._j_typeinfo = JBasicTypeInfo.INSTANT_TYPE_INFO
else:
raise TypeError("Invalid BasicType %s." % self._basic_type)
return self._j_typeinfo
def __eq__(self, o) -> bool:
if isinstance(o, BasicTypeInfo):
return self._basic_type == o._basic_type
return False
def __repr__(self):
return self._basic_type.value
@staticmethod
def STRING_TYPE_INFO():
return BasicTypeInfo(BasicType.STRING)
@staticmethod
def BOOLEAN_TYPE_INFO():
return BasicTypeInfo(BasicType.BOOLEAN)
@staticmethod
def BYTE_TYPE_INFO():
return BasicTypeInfo(BasicType.BYTE)
@staticmethod
def SHORT_TYPE_INFO():
return BasicTypeInfo(BasicType.SHORT)
@staticmethod
def INT_TYPE_INFO():
return BasicTypeInfo(BasicType.INT)
@staticmethod
def LONG_TYPE_INFO():
return BasicTypeInfo(BasicType.LONG)
@staticmethod
def FLOAT_TYPE_INFO():
return BasicTypeInfo(BasicType.FLOAT)
@staticmethod
def DOUBLE_TYPE_INFO():
return BasicTypeInfo(BasicType.DOUBLE)
@staticmethod
def CHAR_TYPE_INFO():
return BasicTypeInfo(BasicType.CHAR)
@staticmethod
def BIG_INT_TYPE_INFO():
return BasicTypeInfo(BasicType.BIG_INT)
@staticmethod
def BIG_DEC_TYPE_INFO():
return BasicTypeInfo(BasicType.BIG_DEC)
@staticmethod
def INSTANT_TYPE_INFO():
return InstantTypeInfo(BasicType.INSTANT)
class InstantTypeInfo(BasicTypeInfo):
"""
InstantTypeInfo enables users to get Instant TypeInfo.
"""
def __init__(self, basic_type: BasicType):
super(InstantTypeInfo, self).__init__(basic_type)
def need_conversion(self):
return True
def to_internal_type(self, obj):
return obj.to_epoch_milli() * 1000
def from_internal_type(self, obj):
from pyflink.common.time import Instant
return Instant.of_epoch_milli(obj // 1000)
class SqlTimeTypeInfo(TypeInformation):
"""
SqlTimeTypeInfo enables users to get Sql Time TypeInfo.
"""
@staticmethod
def DATE():
return DateTypeInfo()
@staticmethod
def TIME():
return TimeTypeInfo()
@staticmethod
def TIMESTAMP():
return TimestampTypeInfo()
class PrimitiveArrayTypeInfo(TypeInformation):
"""
A TypeInformation for arrays of primitive types (int, long, double, ...).
Supports the creation of dedicated efficient serializers for these types.
"""
def __init__(self, element_type: TypeInformation):
self._element_type = element_type
super(PrimitiveArrayTypeInfo, self).__init__()
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
JPrimitiveArrayTypeInfo = get_gateway().jvm.org.apache.flink.api.common.typeinfo \
.PrimitiveArrayTypeInfo
if self._element_type == Types.BOOLEAN():
self._j_typeinfo = JPrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.BYTE():
self._j_typeinfo = JPrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.SHORT():
self._j_typeinfo = JPrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.INT():
self._j_typeinfo = JPrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.LONG():
self._j_typeinfo = JPrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.FLOAT():
self._j_typeinfo = JPrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.DOUBLE():
self._j_typeinfo = JPrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO
elif self._element_type == Types.CHAR():
self._j_typeinfo = JPrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO
else:
raise TypeError("Invalid element type for a primitive array.")
return self._j_typeinfo
def __eq__(self, o) -> bool:
if isinstance(o, PrimitiveArrayTypeInfo):
return self._element_type == o._element_type
return False
def __repr__(self) -> str:
return "PrimitiveArrayTypeInfo<%s>" % self._element_type
class BasicArrayTypeInfo(TypeInformation):
"""
A TypeInformation for arrays of boxed primitive types (Integer, Long, Double, ...).
Supports the creation of dedicated efficient serializers for these types.
"""
def __init__(self, element_type: TypeInformation):
self._element_type = element_type
super(BasicArrayTypeInfo, self).__init__()
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
JBasicArrayTypeInfo = get_gateway().jvm.org.apache.flink.api.common.typeinfo \
.BasicArrayTypeInfo
if self._element_type == Types.BOOLEAN():
self._j_typeinfo = JBasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO
elif self._element_type == Types.BYTE():
self._j_typeinfo = JBasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO
elif self._element_type == Types.SHORT():
self._j_typeinfo = JBasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO
elif self._element_type == Types.INT():
self._j_typeinfo = JBasicArrayTypeInfo.INT_ARRAY_TYPE_INFO
elif self._element_type == Types.LONG():
self._j_typeinfo = JBasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO
elif self._element_type == Types.FLOAT():
self._j_typeinfo = JBasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO
elif self._element_type == Types.DOUBLE():
self._j_typeinfo = JBasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO
elif self._element_type == Types.CHAR():
self._j_typeinfo = JBasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO
elif self._element_type == Types.STRING():
self._j_typeinfo = JBasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO
else:
raise TypeError("Invalid element type for a primitive array.")
return self._j_typeinfo
def __eq__(self, o) -> bool:
if isinstance(o, BasicArrayTypeInfo):
return self._element_type == o._element_type
return False
def __repr__(self):
return "BasicArrayTypeInfo<%s>" % self._element_type
class ObjectArrayTypeInfo(TypeInformation):
"""
A TypeInformation for arrays of non-primitive types.
"""
def __init__(self, element_type: TypeInformation):
self._element_type = element_type
super(ObjectArrayTypeInfo, self).__init__()
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
JTypes = get_gateway().jvm.org.apache.flink.api.common.typeinfo.Types
self._j_typeinfo = JTypes.OBJECT_ARRAY(self._element_type.get_java_type_info())
return self._j_typeinfo
def __eq__(self, o) -> bool:
if isinstance(o, ObjectArrayTypeInfo):
return self._element_type == o._element_type
return False
def __repr__(self):
return "ObjectArrayTypeInfo<%s>" % self._element_type
class PickledBytesTypeInfo(TypeInformation):
"""
A PickledBytesTypeInfo indicates the data is a primitive byte array generated by pickle
serializer.
"""
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
self._j_typeinfo = get_gateway().jvm.org.apache.flink.streaming.api.typeinfo.python\
.PickledByteArrayTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO
return self._j_typeinfo
def __eq__(self, o: object) -> bool:
return isinstance(o, PickledBytesTypeInfo)
def __repr__(self):
return "PickledByteArrayTypeInfo"
class RowTypeInfo(TypeInformation):
"""
TypeInformation for Row.
"""
def __init__(self, field_types: List[TypeInformation], field_names: List[str] = None):
self._field_types = field_types
self._field_names = field_names
self._need_conversion = [f.need_conversion() if isinstance(f, TypeInformation) else None
for f in self._field_types]
self._need_serialize_any_field = any(self._need_conversion)
super(RowTypeInfo, self).__init__()
def get_field_names(self) -> List[str]:
if not self._field_names:
j_field_names = self.get_java_type_info().getFieldNames()
self._field_names = [name for name in j_field_names]
return self._field_names
def get_field_index(self, field_name: str) -> int:
if self._field_names:
return self._field_names.index(field_name)
return -1
def get_field_types(self) -> List[TypeInformation]:
return self._field_types
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
j_types_array = get_gateway()\
.new_array(get_gateway().jvm.org.apache.flink.api.common.typeinfo.TypeInformation,
len(self._field_types))
for i in range(len(self._field_types)):
field_type = self._field_types[i]
if isinstance(field_type, TypeInformation):
j_types_array[i] = field_type.get_java_type_info()
if self._field_names is None:
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.java.typeutils.RowTypeInfo(j_types_array)
else:
j_names_array = get_gateway().new_array(get_gateway().jvm.java.lang.String,
len(self._field_names))
for i in range(len(self._field_names)):
j_names_array[i] = self._field_names[i]
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.java.typeutils.RowTypeInfo(j_types_array, j_names_array)
return self._j_typeinfo
def __eq__(self, other) -> bool:
if isinstance(other, RowTypeInfo):
return self._field_types == other._field_types
return False
def __repr__(self) -> str:
if self._field_names:
return "RowTypeInfo(%s)" % ', '.join([field_name + ': ' + str(field_type)
for field_name, field_type in
zip(self.get_field_names(),
self.get_field_types())])
else:
return "RowTypeInfo(%s)" % ', '.join(
[str(field_type) for field_type in self._field_types])
def need_conversion(self):
return True
def to_internal_type(self, obj):
if obj is None:
return
from pyflink.common import Row, RowKind
if self._need_serialize_any_field:
# Only calling to_internal_type function for fields that need conversion
if isinstance(obj, dict):
return (RowKind.INSERT.value,) + tuple(
f.to_internal_type(obj.get(n)) if c else obj.get(n)
for n, f, c in
zip(self.get_field_names(), self._field_types, self._need_conversion))
elif isinstance(obj, Row) and hasattr(obj, "_fields"):
return (obj.get_row_kind().value,) + tuple(
f.to_internal_type(obj[n]) if c else obj[n]
for n, f, c in
zip(self.get_field_names(), self._field_types, self._need_conversion))
elif isinstance(obj, Row):
return (obj.get_row_kind().value,) + tuple(
f.to_internal_type(v) if c else v
for f, v, c in zip(self._field_types, obj, self._need_conversion))
elif isinstance(obj, (tuple, list)):
return (RowKind.INSERT.value,) + tuple(
f.to_internal_type(v) if c else v
for f, v, c in zip(self._field_types, obj, self._need_conversion))
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return (RowKind.INSERT.value,) + tuple(
f.to_internal_type(d.get(n)) if c else d.get(n)
for n, f, c in
zip(self.get_field_names(), self._field_types, self._need_conversion))
else:
raise ValueError("Unexpected tuple %r with RowTypeInfo" % obj)
else:
if isinstance(obj, dict):
return (RowKind.INSERT.value,) + tuple(obj.get(n) for n in self.get_field_names())
elif isinstance(obj, Row) and hasattr(obj, "_fields"):
return (obj.get_row_kind().value,) + tuple(
obj[n] for n in self.get_field_names())
elif isinstance(obj, Row):
return (obj.get_row_kind().value,) + tuple(obj)
elif isinstance(obj, (list, tuple)):
return (RowKind.INSERT.value,) + tuple(obj)
elif hasattr(obj, "__dict__"):
d = obj.__dict__
return (RowKind.INSERT.value,) + tuple(d.get(n) for n in self.get_field_names())
else:
raise ValueError("Unexpected tuple %r with RowTypeInfo" % obj)
def from_internal_type(self, obj):
if obj is None:
return
if isinstance(obj, (tuple, list)):
# it's already converted by pickler
return obj
if self._need_serialize_any_field:
# Only calling from_internal_type function for fields that need conversion
values = [f.from_internal_type(v) if c else v
for f, v, c in zip(self._field_types, obj, self._need_conversion)]
else:
values = obj
return tuple(values)
class TupleTypeInfo(TypeInformation):
"""
TypeInformation for Tuple.
"""
def __init__(self, field_types: List[TypeInformation]):
self._field_types = field_types
self._need_conversion = [f.need_conversion() if isinstance(f, TypeInformation) else None
for f in self._field_types]
self._need_serialize_any_field = any(self._need_conversion)
super(TupleTypeInfo, self).__init__()
def get_field_types(self) -> List[TypeInformation]:
return self._field_types
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
j_types_array = get_gateway().new_array(
get_gateway().jvm.org.apache.flink.api.common.typeinfo.TypeInformation,
len(self._field_types))
for i in range(len(self._field_types)):
field_type = self._field_types[i]
if isinstance(field_type, TypeInformation):
j_types_array[i] = field_type.get_java_type_info()
self._j_typeinfo = get_gateway().jvm \
.org.apache.flink.api.java.typeutils.TupleTypeInfo(j_types_array)
return self._j_typeinfo
def need_conversion(self):
return True
def to_internal_type(self, obj):
if obj is None:
return
from pyflink.common import Row
if self._need_serialize_any_field:
# Only calling to_internal_type function for fields that need conversion
if isinstance(obj, (list, tuple, Row)):
return tuple(
f.to_internal_type(v) if c else v
for f, v, c in zip(self._field_types, obj, self._need_conversion))
else:
raise ValueError("Unexpected tuple %r with TupleTypeInfo" % obj)
else:
if isinstance(obj, (list, tuple, Row)):
return tuple(obj)
else:
raise ValueError("Unexpected tuple %r with TupleTypeInfo" % obj)
def from_internal_type(self, obj):
if obj is None or isinstance(obj, (tuple, list)):
# it's already converted by pickler
return obj
if self._need_serialize_any_field:
# Only calling from_internal_type function for fields that need conversion
values = [f.from_internal_type(v) if c else v
for f, v, c in zip(self._field_types, obj, self._need_conversion)]
else:
values = obj
return tuple(values)
def __eq__(self, other) -> bool:
if isinstance(other, TupleTypeInfo):
return self._field_types == other._field_types
return False
def __repr__(self) -> str:
return "TupleTypeInfo(%s)" % ', '.join(
[str(field_type) for field_type in self._field_types])
class DateTypeInfo(TypeInformation):
"""
TypeInformation for Date.
"""
def __init__(self):
super(DateTypeInfo, self).__init__()
EPOCH_ORDINAL = datetime.datetime(1970, 1, 1).toordinal()
def need_conversion(self):
return True
def to_internal_type(self, d):
if d is not None:
return d.toordinal() - self.EPOCH_ORDINAL
def from_internal_type(self, v):
if v is not None:
return datetime.date.fromordinal(v + self.EPOCH_ORDINAL)
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.DATE
return self._j_typeinfo
def __eq__(self, o: object) -> bool:
return isinstance(o, DateTypeInfo)
def __repr__(self):
return "DateTypeInfo"
class TimeTypeInfo(TypeInformation):
"""
TypeInformation for Time.
"""
EPOCH_ORDINAL = calendar.timegm(time.localtime(0)) * 10 ** 6
def need_conversion(self):
return True
def to_internal_type(self, t):
if t is not None:
if t.tzinfo is not None:
offset = t.utcoffset()
offset = offset if offset else datetime.timedelta()
offset_microseconds =\
(offset.days * 86400 + offset.seconds) * 10 ** 6 + offset.microseconds
else:
offset_microseconds = self.EPOCH_ORDINAL
minutes = t.hour * 60 + t.minute
seconds = minutes * 60 + t.second
return seconds * 10 ** 6 + t.microsecond - offset_microseconds
def from_internal_type(self, t):
if t is not None:
seconds, microseconds = divmod(t + self.EPOCH_ORDINAL, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.TIME
return self._j_typeinfo
def __eq__(self, o: object) -> bool:
return isinstance(o, TimeTypeInfo)
def __repr__(self) -> str:
return "TimeTypeInfo"
class TimestampTypeInfo(TypeInformation):
"""
TypeInformation for Timestamp.
"""
def need_conversion(self):
return True
def to_internal_type(self, dt):
if dt is not None:
seconds = (calendar.timegm(dt.utctimetuple()) if dt.tzinfo
else time.mktime(dt.timetuple()))
return int(seconds) * 10 ** 6 + dt.microsecond
def from_internal_type(self, ts):
if ts is not None:
return datetime.datetime.fromtimestamp(ts // 10 ** 6).replace(microsecond=ts % 10 ** 6)
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo.TIMESTAMP
return self._j_typeinfo
def __eq__(self, o: object) -> bool:
return isinstance(o, TimestampTypeInfo)
def __repr__(self):
return "TimestampTypeInfo"
class ListTypeInfo(TypeInformation):
"""
A TypeInformation for the list types.
"""
def __init__(self, element_type: TypeInformation):
self.elem_type = element_type
super(ListTypeInfo, self).__init__()
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.common.typeinfo.Types.LIST(
self.elem_type.get_java_type_info())
return self._j_typeinfo
def __eq__(self, other):
if isinstance(other, ListTypeInfo):
return self.elem_type == other.elem_type
else:
return False
def __repr__(self):
return "ListTypeInfo<%s>" % self.elem_type
class MapTypeInfo(TypeInformation):
def __init__(self, key_type_info: TypeInformation, value_type_info: TypeInformation):
self._key_type_info = key_type_info
self._value_type_info = value_type_info
super(MapTypeInfo, self).__init__()
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
self._j_typeinfo = get_gateway().jvm\
.org.apache.flink.api.common.typeinfo.Types.MAP(
self._key_type_info.get_java_type_info(),
self._value_type_info.get_java_type_info())
return self._j_typeinfo
def __eq__(self, other):
if isinstance(other, MapTypeInfo):
return self._key_type_info == other._key_type_info and \
self._value_type_info == other._value_type_info
def __repr__(self) -> str:
return 'MapTypeInfo<{}, {}>'.format(self._key_type_info, self._value_type_info)
class LocalTimeTypeInfo(TypeInformation):
class TimeType(Enum):
LOCAL_DATE = 0
LOCAL_TIME = 1
LOCAL_DATE_TIME = 2
def __init__(self, time_type: TimeType):
super(LocalTimeTypeInfo, self).__init__()
self._time_type = time_type
def get_java_type_info(self) -> JavaObject:
if self._j_typeinfo is None:
jvm = get_gateway().jvm
if self._time_type == LocalTimeTypeInfo.TimeType.LOCAL_DATE:
self._j_typeinfo = \
jvm.org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo.LOCAL_DATE
elif self._time_type == LocalTimeTypeInfo.TimeType.LOCAL_TIME:
self._j_typeinfo = \
jvm.org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo.LOCAL_TIME
elif self._time_type == LocalTimeTypeInfo.TimeType.LOCAL_DATE_TIME:
self._j_typeinfo = \
jvm.org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo.LOCAL_DATE_TIME
else:
raise TypeError('Unsupported TimeType: {}'.format(self._time_type.name))
return self._j_typeinfo
def __eq__(self, other):
return self.__class__ == other.__class__ and self._time_type == other._time_type
def __repr__(self):
return 'LocalTimeTypeInfo<{}>'.format(self._time_type.name)
class ExternalTypeInfo(TypeInformation):
def __init__(self, type_info: TypeInformation):
super(ExternalTypeInfo, self).__init__()
self._type_info = type_info
def get_java_type_info(self) -> JavaObject:
if not self._j_typeinfo:
gateway = get_gateway()
TypeInfoDataTypeConverter = \
gateway.jvm.org.apache.flink.table.types.utils.LegacyTypeInfoDataTypeConverter
JExternalTypeInfo = \
gateway.jvm.org.apache.flink.table.runtime.typeutils.ExternalTypeInfo
j_data_type = TypeInfoDataTypeConverter.toDataType(self._type_info.get_java_type_info())
self._j_typeinfo = JExternalTypeInfo.of(j_data_type)
return self._j_typeinfo
def __eq__(self, other):
return self.__class__ == other.__class__ and self._type_info == other._type_info
def __repr__(self):
return 'ExternalTypeInfo<{}>'.format(self._type_info)
class Types(object):
"""
This class gives access to the type information of the most common types for which Flink has
built-in serializers and comparators.
"""
@staticmethod
def STRING() -> TypeInformation:
"""
Returns type information for string. Supports a None value.
"""
return BasicTypeInfo.STRING_TYPE_INFO()
@staticmethod
def BYTE() -> TypeInformation:
"""
Returns type information for byte. Does not support a None value.
"""
return BasicTypeInfo.BYTE_TYPE_INFO()
@staticmethod
def BOOLEAN() -> TypeInformation:
"""
Returns type information for bool. Does not support a None value.
"""
return BasicTypeInfo.BOOLEAN_TYPE_INFO()
@staticmethod
def SHORT() -> TypeInformation:
"""
Returns type information for short. Does not support a None value.
"""
return BasicTypeInfo.SHORT_TYPE_INFO()
@staticmethod
def INT() -> TypeInformation:
"""
Returns type information for int. Does not support a None value.
"""
return BasicTypeInfo.INT_TYPE_INFO()
@staticmethod
def LONG() -> TypeInformation:
"""
Returns type information for long. Does not support a None value.
"""
return BasicTypeInfo.LONG_TYPE_INFO()
@staticmethod
def FLOAT() -> TypeInformation:
"""
Returns type information for float. Does not support a None value.
"""
return BasicTypeInfo.FLOAT_TYPE_INFO()
@staticmethod
def DOUBLE() -> TypeInformation:
"""
Returns type information for double. Does not support a None value.
"""
return BasicTypeInfo.DOUBLE_TYPE_INFO()
@staticmethod
def CHAR() -> TypeInformation:
"""
Returns type information for char. Does not support a None value.
"""
return BasicTypeInfo.CHAR_TYPE_INFO()
@staticmethod
def BIG_INT() -> TypeInformation:
"""
Returns type information for BigInteger. Supports a None value.
"""
return BasicTypeInfo.BIG_INT_TYPE_INFO()
@staticmethod
def BIG_DEC() -> TypeInformation:
"""
Returns type information for BigDecimal. Supports a None value.
"""
return BasicTypeInfo.BIG_DEC_TYPE_INFO()
@staticmethod
def INSTANT() -> TypeInformation:
"""
Returns type information for Instant. Supports a None value.
"""
return BasicTypeInfo.INSTANT_TYPE_INFO()
@staticmethod
def SQL_DATE() -> TypeInformation:
"""
Returns type information for Date. Supports a None value.
"""
return SqlTimeTypeInfo.DATE()
@staticmethod
def SQL_TIME() -> TypeInformation:
"""
Returns type information for Time. Supports a None value.
"""
return SqlTimeTypeInfo.TIME()
@staticmethod
def SQL_TIMESTAMP() -> TypeInformation:
"""
Returns type information for Timestamp. Supports a None value.
"""
return SqlTimeTypeInfo.TIMESTAMP()
@staticmethod
def PICKLED_BYTE_ARRAY() -> TypeInformation:
"""
Returns type information which uses pickle for serialization/deserialization.
"""
return PickledBytesTypeInfo()
@staticmethod
def ROW(field_types: List[TypeInformation]):
"""
Returns type information for Row with fields of the given types. A row itself must not be
null.
:param field_types: the types of the row fields, e.g., Types.String(), Types.INT()
"""
return RowTypeInfo(field_types)
@staticmethod
def ROW_NAMED(field_names: List[str], field_types: List[TypeInformation]):
"""
Returns type information for Row with fields of the given types and with given names. A row
must not be null.
:param field_names: array of field names.
:param field_types: array of field types.
"""
return RowTypeInfo(field_types, field_names)
@staticmethod
def TUPLE(field_types: List[TypeInformation]):
"""
Returns type information for Tuple with fields of the given types. A Tuple itself must not
be null.
:param field_types: array of field types.
"""
return TupleTypeInfo(field_types)
@staticmethod
def PRIMITIVE_ARRAY(element_type: TypeInformation):
"""
Returns type information for arrays of primitive type (such as byte[]). The array must not
be null.
:param element_type: element type of the array (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
"""
return PrimitiveArrayTypeInfo(element_type)
@staticmethod
def BASIC_ARRAY(element_type: TypeInformation) -> TypeInformation:
"""
Returns type information for arrays of boxed primitive type (such as Integer[]).
:param element_type: element type of the array (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
"""
return BasicArrayTypeInfo(element_type)
@staticmethod
def OBJECT_ARRAY(element_type: TypeInformation) -> TypeInformation:
"""
Returns type information for arrays of non-primitive types. The array itself must not be
None. None values for elements are supported.
:param element_type: element type of the array
"""
return ObjectArrayTypeInfo(element_type)
@staticmethod
def MAP(key_type_info: TypeInformation, value_type_info: TypeInformation) -> TypeInformation:
"""
Special TypeInformation used by MapStateDescriptor
:param key_type_info: Element type of key (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
:param value_type_info: Element type of value (e.g. Types.BOOLEAN(), Types.INT(),
Types.DOUBLE())
"""
return MapTypeInfo(key_type_info, value_type_info)
@staticmethod
def LIST(element_type_info: TypeInformation) -> TypeInformation:
"""
A TypeInformation for the list type.
:param element_type_info: The type of the elements in the list
"""
return ListTypeInfo(element_type_info)
def _from_java_type(j_type_info: JavaObject) -> TypeInformation:
gateway = get_gateway()
JBasicTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.BasicTypeInfo
if _is_instance_of(j_type_info, JBasicTypeInfo.STRING_TYPE_INFO):
return Types.STRING()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BOOLEAN_TYPE_INFO):
return Types.BOOLEAN()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BYTE_TYPE_INFO):
return Types.BYTE()
elif _is_instance_of(j_type_info, JBasicTypeInfo.SHORT_TYPE_INFO):
return Types.SHORT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.INT_TYPE_INFO):
return Types.INT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.LONG_TYPE_INFO):
return Types.LONG()
elif _is_instance_of(j_type_info, JBasicTypeInfo.FLOAT_TYPE_INFO):
return Types.FLOAT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.DOUBLE_TYPE_INFO):
return Types.DOUBLE()
elif _is_instance_of(j_type_info, JBasicTypeInfo.CHAR_TYPE_INFO):
return Types.CHAR()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BIG_INT_TYPE_INFO):
return Types.BIG_INT()
elif _is_instance_of(j_type_info, JBasicTypeInfo.BIG_DEC_TYPE_INFO):
return Types.BIG_DEC()
elif _is_instance_of(j_type_info, JBasicTypeInfo.INSTANT_TYPE_INFO):
return Types.INSTANT()
JSqlTimeTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.SqlTimeTypeInfo
if _is_instance_of(j_type_info, JSqlTimeTypeInfo.DATE):
return Types.SQL_DATE()
elif _is_instance_of(j_type_info, JSqlTimeTypeInfo.TIME):
return Types.SQL_TIME()
elif _is_instance_of(j_type_info, JSqlTimeTypeInfo.TIMESTAMP):
return Types.SQL_TIMESTAMP()
JPrimitiveArrayTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo \
.PrimitiveArrayTypeInfo
if _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.BOOLEAN())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.BYTE())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.SHORT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.INT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.LONG())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.FLOAT())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.DOUBLE())
elif _is_instance_of(j_type_info, JPrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO):
return Types.PRIMITIVE_ARRAY(Types.CHAR())
JBasicArrayTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.BasicArrayTypeInfo
if _is_instance_of(j_type_info, JBasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.BOOLEAN())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.BYTE())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.SHORT())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.INT_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.INT())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.LONG())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.FLOAT())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.DOUBLE())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.CHAR())
elif _is_instance_of(j_type_info, JBasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO):
return Types.BASIC_ARRAY(Types.STRING())
JObjectArrayTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.ObjectArrayTypeInfo
if _is_instance_of(j_type_info, JObjectArrayTypeInfo):
return Types.OBJECT_ARRAY(_from_java_type(j_type_info.getComponentInfo()))
JPickledBytesTypeInfo = gateway.jvm \
.org.apache.flink.streaming.api.typeinfo.python.PickledByteArrayTypeInfo\
.PICKLED_BYTE_ARRAY_TYPE_INFO
if _is_instance_of(j_type_info, JPickledBytesTypeInfo):
return Types.PICKLED_BYTE_ARRAY()
JRowTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.RowTypeInfo
if _is_instance_of(j_type_info, JRowTypeInfo):
j_row_field_names = j_type_info.getFieldNames()
j_row_field_types = j_type_info.getFieldTypes()
row_field_types = [_from_java_type(j_row_field_type) for j_row_field_type in
j_row_field_types]
row_field_names = [field_name for field_name in j_row_field_names]
return Types.ROW_NAMED(row_field_names, row_field_types)
JTupleTypeInfo = gateway.jvm.org.apache.flink.api.java.typeutils.TupleTypeInfo
if _is_instance_of(j_type_info, JTupleTypeInfo):
j_field_types = []
for i in range(j_type_info.getArity()):
j_field_types.append(j_type_info.getTypeAt(i))
field_types = [_from_java_type(j_field_type) for j_field_type in j_field_types]
return TupleTypeInfo(field_types)
JMapTypeInfo = get_gateway().jvm.org.apache.flink.api.java.typeutils.MapTypeInfo
if _is_instance_of(j_type_info, JMapTypeInfo):
j_key_type_info = j_type_info.getKeyTypeInfo()
j_value_type_info = j_type_info.getValueTypeInfo()
return MapTypeInfo(_from_java_type(j_key_type_info), _from_java_type(j_value_type_info))
JListTypeInfo = get_gateway().jvm.org.apache.flink.api.java.typeutils.ListTypeInfo
if _is_instance_of(j_type_info, JListTypeInfo):
j_element_type_info = j_type_info.getElementTypeInfo()
return ListTypeInfo(_from_java_type(j_element_type_info))
JLocalTimeTypeInfo = gateway.jvm.org.apache.flink.api.common.typeinfo.LocalTimeTypeInfo
if _is_instance_of(j_type_info, JLocalTimeTypeInfo):
if j_type_info.equals(JLocalTimeTypeInfo.LOCAL_DATE):
time_type = LocalTimeTypeInfo.TimeType.LOCAL_DATE
elif j_type_info.equals(JLocalTimeTypeInfo.LOCAL_TIME):
time_type = LocalTimeTypeInfo.TimeType.LOCAL_TIME
elif j_type_info.equals(JLocalTimeTypeInfo.LOCAL_DATE_TIME):
time_type = LocalTimeTypeInfo.TimeType.LOCAL_DATE_TIME
else:
raise TypeError("Unsupported LocalTimeTypeInfo: %s." % j_type_info.toString())
return LocalTimeTypeInfo(time_type)
JExternalTypeInfo = gateway.jvm.org.apache.flink.table.runtime.typeutils.ExternalTypeInfo
if _is_instance_of(j_type_info, JExternalTypeInfo):
TypeInfoDataTypeConverter = \
gateway.jvm.org.apache.flink.table.types.utils.LegacyTypeInfoDataTypeConverter
return ExternalTypeInfo(_from_java_type(
TypeInfoDataTypeConverter.toLegacyTypeInfo(j_type_info.getDataType())))
raise TypeError("The java type info: %s is not supported in PyFlink currently." % j_type_info)
def _is_instance_of(java_object: JavaObject, java_type: Union[JavaObject, JavaClass]) -> bool:
if isinstance(java_type, JavaObject):
return java_object.equals(java_type)
elif isinstance(java_type, JavaClass):
return java_object.getClass().isAssignableFrom(java_type._java_lang_class)
return False
| 43,933 | 37.983141 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/job_client.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.completable_future import CompletableFuture
from pyflink.common.job_execution_result import JobExecutionResult
from pyflink.common.job_id import JobID
from pyflink.common.job_status import JobStatus
__all__ = ['JobClient']
class JobClient(object):
"""
A client that is scoped to a specific job.
.. versionadded:: 1.11.0
"""
def __init__(self, j_job_client):
self._j_job_client = j_job_client
def get_job_id(self) -> JobID:
"""
Returns the JobID that uniquely identifies the job this client is scoped to.
:return: JobID, or null if the job has been executed on a runtime without JobIDs
or if the execution failed.
.. versionadded:: 1.11.0
"""
return JobID(self._j_job_client.getJobID())
def get_job_status(self) -> CompletableFuture:
"""
Requests the JobStatus of the associated job.
:return: A CompletableFuture containing the JobStatus of the associated job.
.. versionadded:: 1.11.0
"""
return CompletableFuture(self._j_job_client.getJobStatus(), JobStatus._from_j_job_status)
def cancel(self) -> CompletableFuture:
"""
Cancels the associated job.
:return: A CompletableFuture for canceling the associated job.
.. versionadded:: 1.11.0
"""
return CompletableFuture(self._j_job_client.cancel())
def stop_with_savepoint(self, advance_to_end_of_event_time: bool,
savepoint_directory: str = None) -> CompletableFuture:
"""
Stops the associated job on Flink cluster.
Stopping works only for streaming programs. Be aware, that the job might continue to run
for a while after sending the stop command, because after sources stopped to emit data all
operators need to finish processing.
:param advance_to_end_of_event_time: Flag indicating if the source should inject a
MAX_WATERMARK in the pipeline.
:param savepoint_directory: Directory the savepoint should be written to.
:return: A CompletableFuture containing the path where the savepoint is located.
.. versionadded:: 1.11.0
"""
return CompletableFuture(
self._j_job_client.stopWithSavepoint(advance_to_end_of_event_time, savepoint_directory),
str)
def trigger_savepoint(self, savepoint_directory: str = None) -> CompletableFuture:
"""
Triggers a savepoint for the associated job. The savepoint will be written to the given
savepoint directory.
:param savepoint_directory: Directory the savepoint should be written to.
:return: A CompletableFuture containing the path where the savepoint is located.
.. versionadded:: 1.11.0
"""
return CompletableFuture(self._j_job_client.triggerSavepoint(savepoint_directory), str)
def get_accumulators(self) -> CompletableFuture:
"""
Requests the accumulators of the associated job. Accumulators can be requested while it
is running or after it has finished. The class loader is used to deserialize the incoming
accumulator results.
:param class_loader: Class loader used to deserialize the incoming accumulator results.
:return: A CompletableFuture containing the accumulators of the associated job.
.. versionadded:: 1.11.0
"""
return CompletableFuture(self._j_job_client.getAccumulators(), dict)
def get_job_execution_result(self) -> CompletableFuture:
"""
Returns the JobExecutionResult result of the job execution of the submitted job.
:return: A CompletableFuture containing the JobExecutionResult result of the job execution.
.. versionadded:: 1.11.0
"""
return CompletableFuture(self._j_job_client.getJobExecutionResult(),
JobExecutionResult)
| 4,961 | 39.672131 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/job_execution_result.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Dict, Any
from pyflink.common.job_id import JobID
__all__ = ['JobExecutionResult']
class JobExecutionResult(object):
"""
The result of a job execution. Gives access to the execution time of the job,
and to all accumulators created by this job.
.. versionadded:: 1.11.0
"""
def __init__(self, j_job_execution_result):
self._j_job_execution_result = j_job_execution_result
def get_job_id(self) -> JobID:
"""
Returns the JobID assigned to the job by the Flink runtime.
:return: JobID, or null if the job has been executed on a runtime without JobIDs
or if the execution failed.
.. versionadded:: 1.11.0
"""
return JobID(self._j_job_execution_result.getJobID())
def get_net_runtime(self) -> int:
"""
Gets the net execution time of the job, i.e., the execution time in the parallel system,
without the pre-flight steps like the optimizer.
:return: The net execution time in milliseconds.
.. versionadded:: 1.11.0
"""
return self._j_job_execution_result.getNetRuntime()
def get_accumulator_result(self, accumulator_name: str):
"""
Gets the accumulator with the given name. Returns None, if no accumulator with
that name was produced.
:param accumulator_name: The name of the accumulator.
:return: The value of the accumulator with the given name.
.. versionadded:: 1.11.0
"""
return self.get_all_accumulator_results().get(accumulator_name)
def get_all_accumulator_results(self) -> Dict[str, Any]:
"""
Gets all accumulators produced by the job. The map contains the accumulators as
mappings from the accumulator name to the accumulator value.
:return: The dict which the keys are names of the accumulator and the values
are values of the accumulator produced by the job.
.. versionadded:: 1.11.0
"""
j_result_map = self._j_job_execution_result.getAllAccumulatorResults()
accumulators = {}
for key in j_result_map:
accumulators[key] = j_result_map[key]
return accumulators
def __str__(self):
"""
Convert JobExecutionResult to a string, if possible.
.. versionadded:: 1.11.0
"""
return self._j_job_execution_result.toString()
| 3,400 | 35.569892 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/common/io.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.utils import JavaObjectWrapper
__all__ = [
'InputFormat'
]
class InputFormat(JavaObjectWrapper):
"""
The Python wrapper of Java InputFormat interface, which is the base interface for data sources
that produce records.
"""
def __init__(self, j_input_format):
super().__init__(j_input_format)
| 1,305 | 38.575758 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/common/restart_strategy.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from datetime import timedelta
from typing import Optional
from py4j.java_gateway import get_java_class
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_j_flink_time, from_j_flink_time
__all__ = ['RestartStrategies', 'RestartStrategyConfiguration']
class RestartStrategyConfiguration(object, metaclass=ABCMeta):
"""
Abstract configuration for restart strategies.
"""
def __init__(self, j_restart_strategy_configuration):
self._j_restart_strategy_configuration = j_restart_strategy_configuration
def get_description(self) -> str:
"""
Returns a description which is shown in the web interface.
:return: Description of the restart strategy.
"""
return self._j_restart_strategy_configuration.getDescription()
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self._j_restart_strategy_configuration == \
other._j_restart_strategy_configuration
def __hash__(self):
return self._j_restart_strategy_configuration.hashCode()
class RestartStrategies(object):
"""
This class defines methods to generate RestartStrategyConfigurations. These configurations are
used to create RestartStrategies at runtime.
The RestartStrategyConfigurations are used to decouple the core module from the runtime module.
"""
class NoRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Configuration representing no restart strategy.
"""
def __init__(self, j_restart_strategy=None):
if j_restart_strategy is None:
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies.NoRestartStrategyConfiguration()
super(RestartStrategies.NoRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.NoRestartStrategyConfiguration, self) \
.__init__(j_restart_strategy)
class FixedDelayRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Configuration representing a fixed delay restart strategy.
"""
def __init__(self, restart_attempts=None, delay_between_attempts_interval=None,
j_restart_strategy=None):
if j_restart_strategy is None:
if not isinstance(delay_between_attempts_interval, (timedelta, int)):
raise TypeError("The delay_between_attempts_interval 'failure_interval' "
"only supports integer and datetime.timedelta, current input "
"type is %s." % type(delay_between_attempts_interval))
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies\
.fixedDelayRestart(
restart_attempts, to_j_flink_time(delay_between_attempts_interval))
super(RestartStrategies.FixedDelayRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.FixedDelayRestartStrategyConfiguration, self) \
.__init__(j_restart_strategy)
def get_restart_attempts(self) -> int:
return self._j_restart_strategy_configuration.getRestartAttempts()
def get_delay_between_attempts_interval(self) -> timedelta:
return from_j_flink_time(
self._j_restart_strategy_configuration.getDelayBetweenAttemptsInterval())
class FailureRateRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Configuration representing a failure rate restart strategy.
"""
def __init__(self,
max_failure_rate=None,
failure_interval=None,
delay_between_attempts_interval=None,
j_restart_strategy=None):
if j_restart_strategy is None:
if not isinstance(failure_interval, (timedelta, int)):
raise TypeError("The parameter 'failure_interval' "
"only supports integer and datetime.timedelta, current input "
"type is %s." % type(failure_interval))
if not isinstance(delay_between_attempts_interval, (timedelta, int)):
raise TypeError("The delay_between_attempts_interval 'failure_interval' "
"only supports integer and datetime.timedelta, current input "
"type is %s." % type(delay_between_attempts_interval))
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies\
.FailureRateRestartStrategyConfiguration(max_failure_rate,
to_j_flink_time(failure_interval),
to_j_flink_time(
delay_between_attempts_interval))
super(RestartStrategies.FailureRateRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.FailureRateRestartStrategyConfiguration, self)\
.__init__(j_restart_strategy)
def get_max_failure_rate(self) -> int:
return self._j_restart_strategy_configuration.getMaxFailureRate()
def get_failure_interval(self) -> timedelta:
return from_j_flink_time(self._j_restart_strategy_configuration.getFailureInterval())
def get_delay_between_attempts_interval(self) -> timedelta:
return from_j_flink_time(self._j_restart_strategy_configuration
.getDelayBetweenAttemptsInterval())
class FallbackRestartStrategyConfiguration(RestartStrategyConfiguration):
"""
Restart strategy configuration that could be used by jobs to use cluster level restart
strategy. Useful especially when one has a custom implementation of restart strategy set via
flink-conf.yaml.
"""
def __init__(self, j_restart_strategy=None):
if j_restart_strategy is None:
gateway = get_gateway()
self._j_restart_strategy_configuration = \
gateway.jvm.RestartStrategies.FallbackRestartStrategyConfiguration()
super(RestartStrategies.FallbackRestartStrategyConfiguration, self)\
.__init__(self._j_restart_strategy_configuration)
else:
super(RestartStrategies.FallbackRestartStrategyConfiguration, self)\
.__init__(j_restart_strategy)
@staticmethod
def _from_j_restart_strategy(j_restart_strategy) -> Optional[RestartStrategyConfiguration]:
if j_restart_strategy is None:
return None
gateway = get_gateway()
NoRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.NoRestartStrategyConfiguration
FixedDelayRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.FixedDelayRestartStrategyConfiguration
FailureRateRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.FailureRateRestartStrategyConfiguration
FallbackRestartStrategyConfiguration = gateway.jvm.RestartStrategies\
.FallbackRestartStrategyConfiguration
clz = j_restart_strategy.getClass()
if clz.getName() == get_java_class(NoRestartStrategyConfiguration).getName():
return RestartStrategies.NoRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
elif clz.getName() == get_java_class(FixedDelayRestartStrategyConfiguration).getName():
return RestartStrategies.FixedDelayRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
elif clz.getName() == get_java_class(FailureRateRestartStrategyConfiguration).getName():
return RestartStrategies.FailureRateRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
elif clz.getName() == get_java_class(FallbackRestartStrategyConfiguration).getName():
return RestartStrategies.FallbackRestartStrategyConfiguration(
j_restart_strategy=j_restart_strategy)
else:
raise Exception("Unsupported java RestartStrategyConfiguration: %s" % clz.getName())
@staticmethod
def no_restart() -> 'NoRestartStrategyConfiguration':
"""
Generates NoRestartStrategyConfiguration.
:return: The :class:`NoRestartStrategyConfiguration`.
"""
return RestartStrategies.NoRestartStrategyConfiguration()
@staticmethod
def fall_back_restart() -> 'FallbackRestartStrategyConfiguration':
return RestartStrategies.FallbackRestartStrategyConfiguration()
@staticmethod
def fixed_delay_restart(restart_attempts: int, delay_between_attempts: int) -> \
'FixedDelayRestartStrategyConfiguration':
"""
Generates a FixedDelayRestartStrategyConfiguration.
:param restart_attempts: Number of restart attempts for the FixedDelayRestartStrategy.
:param delay_between_attempts: Delay in-between restart attempts for the
FixedDelayRestartStrategy, the input could be integer value
in milliseconds or datetime.timedelta object.
:return: The :class:`FixedDelayRestartStrategyConfiguration`.
"""
return RestartStrategies.FixedDelayRestartStrategyConfiguration(restart_attempts,
delay_between_attempts)
@staticmethod
def failure_rate_restart(failure_rate: int, failure_interval: int, delay_interval: int) -> \
'FailureRateRestartStrategyConfiguration':
"""
Generates a FailureRateRestartStrategyConfiguration.
:param failure_rate: Maximum number of restarts in given interval ``failure_interval``
before failing a job.
:param failure_interval: Time interval for failures, the input could be integer value
in milliseconds or datetime.timedelta object.
:param delay_interval: Delay in-between restart attempts, the input could be integer value
in milliseconds or datetime.timedelta object.
"""
return RestartStrategies.FailureRateRestartStrategyConfiguration(failure_rate,
failure_interval,
delay_interval)
| 12,193 | 49.38843 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/serialization.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.utils import JavaObjectWrapper
from pyflink.java_gateway import get_gateway
__all__ = [
'SerializationSchema',
'DeserializationSchema',
'SimpleStringSchema',
'Encoder',
'BulkWriterFactory'
]
class SerializationSchema(object):
"""
Base class for SerializationSchema. The serialization schema describes how to turn a data object
into a different serialized representation. Most data sinks (for example Apache Kafka) require
the data to be handed to them in a specific format (for example as byte strings).
"""
def __init__(self, j_serialization_schema=None):
self._j_serialization_schema = j_serialization_schema
class DeserializationSchema(object):
"""
Base class for DeserializationSchema. The deserialization schema describes how to turn the byte
messages delivered by certain data sources (for example Apache Kafka) into data types (Java/
Scala objects) that are processed by Flink.
In addition, the DeserializationSchema describes the produced type which lets Flink create
internal serializers and structures to handle the type.
"""
def __init__(self, j_deserialization_schema=None):
self._j_deserialization_schema = j_deserialization_schema
class SimpleStringSchema(SerializationSchema, DeserializationSchema):
"""
Very simple serialization/deserialization schema for strings. By default, the serializer uses
'UTF-8' for string/byte conversion.
"""
def __init__(self, charset: str = 'UTF-8'):
gate_way = get_gateway()
j_char_set = gate_way.jvm.java.nio.charset.Charset.forName(charset)
j_simple_string_serialization_schema = gate_way \
.jvm.org.apache.flink.api.common.serialization.SimpleStringSchema(j_char_set)
SerializationSchema.__init__(self,
j_serialization_schema=j_simple_string_serialization_schema)
DeserializationSchema.__init__(
self, j_deserialization_schema=j_simple_string_serialization_schema)
class Encoder(object):
"""
Encoder is used by the file sink to perform the actual writing of the
incoming elements to the files in a bucket.
"""
def __init__(self, j_encoder):
self._j_encoder = j_encoder
@staticmethod
def simple_string_encoder(charset_name: str = "UTF-8") -> 'Encoder':
"""
A simple Encoder that uses toString() on the input elements and writes them to
the output bucket file separated by newline.
"""
j_encoder = get_gateway().jvm.org.apache.flink.api.common.serialization.\
SimpleStringEncoder(charset_name)
return Encoder(j_encoder)
class BulkWriterFactory(JavaObjectWrapper):
"""
The Python wrapper of Java BulkWriter.Factory interface, which is the base interface for data
sinks that write records into files in a bulk manner.
"""
def __init__(self, j_bulk_writer_factory):
super().__init__(j_bulk_writer_factory)
class RowDataBulkWriterFactory(BulkWriterFactory):
"""
A :class:`~BulkWriterFactory` that receives records with RowData type. This is for indicating
that Row record from Python must be first converted to RowData.
"""
def __init__(self, j_bulk_writer_factory, row_type):
super().__init__(j_bulk_writer_factory)
self._row_type = row_type
def get_row_type(self):
return self._row_type
| 4,417 | 38.097345 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/tests/test_execution_config.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.common import (ExecutionConfig, RestartStrategies, ExecutionMode, Configuration)
from pyflink.java_gateway import get_gateway
from pyflink.testing.test_case_utils import PyFlinkTestCase
from pyflink.util.java_utils import get_j_env_configuration
class ExecutionConfigTests(PyFlinkTestCase):
def setUp(self):
self.env = StreamExecutionEnvironment.get_execution_environment()
self.execution_config = self.env.get_config()
def test_constant(self):
gateway = get_gateway()
JExecutionConfig = gateway.jvm.org.apache.flink.api.common.ExecutionConfig
self.assertEqual(ExecutionConfig.PARALLELISM_DEFAULT, JExecutionConfig.PARALLELISM_DEFAULT)
self.assertEqual(ExecutionConfig.PARALLELISM_UNKNOWN, JExecutionConfig.PARALLELISM_UNKNOWN)
def test_get_set_closure_cleaner(self):
self.assertTrue(self.execution_config.is_closure_cleaner_enabled())
self.execution_config.disable_closure_cleaner()
self.assertFalse(self.execution_config.is_closure_cleaner_enabled())
self.execution_config.enable_closure_cleaner()
self.assertTrue(self.execution_config.is_closure_cleaner_enabled())
def test_get_set_auto_watermark_interval(self):
self.assertEqual(self.execution_config.get_auto_watermark_interval(), 200)
self.execution_config.set_auto_watermark_interval(1000)
self.assertEqual(self.execution_config.get_auto_watermark_interval(), 1000)
def test_get_set_parallelism(self):
self.execution_config.set_parallelism(8)
self.assertEqual(self.execution_config.get_parallelism(), 8)
self.execution_config.set_parallelism(4)
self.assertEqual(self.execution_config.get_parallelism(), 4)
def test_get_set_max_parallelism(self):
self.execution_config.set_max_parallelism(12)
self.assertEqual(self.execution_config.get_max_parallelism(), 12)
self.execution_config.set_max_parallelism(16)
self.assertEqual(self.execution_config.get_max_parallelism(), 16)
def test_get_set_task_cancellation_interval(self):
self.assertEqual(self.execution_config.get_task_cancellation_interval(), 30000)
self.execution_config.set_task_cancellation_interval(1000)
self.assertEqual(self.execution_config.get_task_cancellation_interval(), 1000)
def test_get_set_task_cancellation_timeout(self):
self.assertEqual(self.execution_config.get_task_cancellation_timeout(), 180000)
self.execution_config.set_task_cancellation_timeout(3000)
self.assertEqual(self.execution_config.get_task_cancellation_timeout(), 3000)
def test_get_set_restart_strategy(self):
self.execution_config.set_restart_strategy(RestartStrategies.no_restart())
self.assertEqual(self.execution_config.get_restart_strategy(),
RestartStrategies.no_restart())
self.execution_config.set_restart_strategy(
RestartStrategies.failure_rate_restart(5, 10000, 5000))
self.assertIsInstance(self.execution_config.get_restart_strategy(),
RestartStrategies.FailureRateRestartStrategyConfiguration)
self.execution_config.set_restart_strategy(RestartStrategies.fixed_delay_restart(4, 10000))
self.assertIsInstance(self.execution_config.get_restart_strategy(),
RestartStrategies.FixedDelayRestartStrategyConfiguration)
self.execution_config.set_restart_strategy(RestartStrategies.fall_back_restart())
self.assertEqual(self.execution_config.get_restart_strategy(),
RestartStrategies.fall_back_restart())
def test_get_set_execution_mode(self):
self.execution_config.set_execution_mode(ExecutionMode.BATCH)
self.assertEqual(self.execution_config.get_execution_mode(), ExecutionMode.BATCH)
self.execution_config.set_execution_mode(ExecutionMode.PIPELINED)
self.assertEqual(self.execution_config.get_execution_mode(), ExecutionMode.PIPELINED)
self.execution_config.set_execution_mode(ExecutionMode.BATCH_FORCED)
self.assertEqual(self.execution_config.get_execution_mode(), ExecutionMode.BATCH_FORCED)
self.execution_config.set_execution_mode(ExecutionMode.PIPELINED_FORCED)
self.assertEqual(self.execution_config.get_execution_mode(), ExecutionMode.PIPELINED_FORCED)
def test_disable_enable_force_kryo(self):
self.execution_config.disable_force_kryo()
self.assertFalse(self.execution_config.is_force_kryo_enabled())
self.execution_config.enable_force_kryo()
self.assertTrue(self.execution_config.is_force_kryo_enabled())
def test_disable_enable_generic_types(self):
self.execution_config.disable_generic_types()
self.assertTrue(self.execution_config.has_generic_types_disabled())
self.execution_config.enable_generic_types()
self.assertFalse(self.execution_config.has_generic_types_disabled())
def test_disable_enable_auto_generated_uids(self):
self.execution_config.disable_auto_generated_uids()
self.assertFalse(self.execution_config.has_auto_generated_uids_enabled())
self.execution_config.enable_auto_generated_uids()
self.assertTrue(self.execution_config.has_auto_generated_uids_enabled())
def test_disable_enable_force_avro(self):
self.execution_config.disable_force_avro()
self.assertFalse(self.execution_config.is_force_avro_enabled())
self.execution_config.enable_force_avro()
self.assertTrue(self.execution_config.is_force_avro_enabled())
def test_disable_enable_object_reuse(self):
self.execution_config.disable_object_reuse()
self.assertFalse(self.execution_config.is_object_reuse_enabled())
self.execution_config.enable_object_reuse()
self.assertTrue(self.execution_config.is_object_reuse_enabled())
def test_get_set_global_job_parameters(self):
self.execution_config.set_global_job_parameters({"hello": "world"})
self.assertEqual(self.execution_config.get_global_job_parameters(), {"hello": "world"})
def test_add_default_kryo_serializer(self):
self.execution_config.add_default_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.execution_config.get_default_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_type_with_kryo_serializer(self):
self.execution_config.register_type_with_kryo_serializer(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo",
"org.apache.flink.runtime.state.StateBackendTestBase$CustomKryoTestSerializer")
class_dict = self.execution_config.get_registered_types_with_kryo_serializer_classes()
self.assertEqual(class_dict,
{'org.apache.flink.runtime.state.StateBackendTestBase$TestPojo':
'org.apache.flink.runtime.state'
'.StateBackendTestBase$CustomKryoTestSerializer'})
def test_register_pojo_type(self):
self.execution_config.register_pojo_type(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo")
type_list = self.execution_config.get_registered_pojo_types()
self.assertEqual(type_list,
["org.apache.flink.runtime.state.StateBackendTestBase$TestPojo"])
def test_register_kryo_type(self):
self.execution_config.register_kryo_type(
"org.apache.flink.runtime.state.StateBackendTestBase$TestPojo")
type_list = self.execution_config.get_registered_kryo_types()
self.assertEqual(type_list,
["org.apache.flink.runtime.state.StateBackendTestBase$TestPojo"])
def test_auto_type_registration(self):
self.assertFalse(self.execution_config.is_auto_type_registration_disabled())
self.execution_config.disable_auto_type_registration()
self.assertTrue(self.execution_config.is_auto_type_registration_disabled())
def test_get_set_use_snapshot_compression(self):
self.execution_config.set_use_snapshot_compression(False)
self.assertFalse(self.execution_config.is_use_snapshot_compression())
self.execution_config.set_use_snapshot_compression(True)
self.assertTrue(self.execution_config.is_use_snapshot_compression())
def test_equals_and_hash(self):
config1 = StreamExecutionEnvironment.get_execution_environment().get_config()
config2 = StreamExecutionEnvironment.get_execution_environment().get_config()
self.assertEqual(config1, config2)
self.assertEqual(hash(config1), hash(config2))
config1.set_parallelism(12)
config2.set_parallelism(11)
self.assertNotEqual(config1, config2)
# it is allowed for hashes to be equal even if objects are not
config2.set_parallelism(12)
self.assertEqual(config1, config2)
self.assertEqual(hash(config1), hash(config2))
def test_get_execution_environment_with_config(self):
configuration = Configuration()
configuration.set_integer('parallelism.default', 12)
configuration.set_string('pipeline.name', 'haha')
env = StreamExecutionEnvironment.get_execution_environment(configuration)
execution_config = env.get_config()
self.assertEqual(execution_config.get_parallelism(), 12)
config = Configuration(
j_configuration=get_j_env_configuration(env._j_stream_execution_environment))
self.assertEqual(config.get_string('pipeline.name', ''), 'haha')
| 11,105 | 37.429066 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/tests/test_watermark_strategy.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Duration
from pyflink.common.watermark_strategy import WatermarkStrategy
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import is_instance_of, get_field_value
from pyflink.testing.test_case_utils import PyFlinkTestCase
class WatermarkStrategyTests(PyFlinkTestCase):
def test_with_idleness(self):
jvm = get_gateway().jvm
j_watermark_strategy = WatermarkStrategy.no_watermarks().with_idleness(
Duration.of_seconds(5)
)._j_watermark_strategy
self.assertTrue(is_instance_of(
j_watermark_strategy,
jvm.org.apache.flink.api.common.eventtime.WatermarkStrategyWithIdleness
))
self.assertEqual(get_field_value(j_watermark_strategy, "idlenessTimeout").toMillis(), 5000)
def test_with_watermark_alignment(self):
jvm = get_gateway().jvm
j_watermark_strategy = WatermarkStrategy.no_watermarks().with_watermark_alignment(
"alignment-group-1", Duration.of_seconds(20), Duration.of_seconds(10)
)._j_watermark_strategy
self.assertTrue(is_instance_of(
j_watermark_strategy,
jvm.org.apache.flink.api.common.eventtime.WatermarksWithWatermarkAlignment
))
alignment_parameters = j_watermark_strategy.getAlignmentParameters()
self.assertEqual(alignment_parameters.getWatermarkGroup(), "alignment-group-1")
self.assertEqual(alignment_parameters.getMaxAllowedWatermarkDrift(), 20000)
self.assertEqual(alignment_parameters.getUpdateInterval(), 10000)
def test_for_monotonous_timestamps(self):
jvm = get_gateway().jvm
j_watermark_strategy = WatermarkStrategy.for_monotonous_timestamps()._j_watermark_strategy
self.assertTrue(is_instance_of(
j_watermark_strategy.createWatermarkGenerator(None),
jvm.org.apache.flink.api.common.eventtime.AscendingTimestampsWatermarks
))
def test_for_bounded_out_of_orderness(self):
jvm = get_gateway().jvm
j_watermark_strategy = WatermarkStrategy.for_bounded_out_of_orderness(
Duration.of_seconds(3)
)._j_watermark_strategy
j_watermark_generator = j_watermark_strategy.createWatermarkGenerator(None)
self.assertTrue(is_instance_of(
j_watermark_generator,
jvm.org.apache.flink.api.common.eventtime.BoundedOutOfOrdernessWatermarks
))
self.assertEqual(get_field_value(j_watermark_generator, "outOfOrdernessMillis"), 3000)
def test_no_watermarks(self):
jvm = get_gateway().jvm
j_watermark_strategy = WatermarkStrategy.no_watermarks()._j_watermark_strategy
self.assertTrue(is_instance_of(
j_watermark_strategy.createWatermarkGenerator(None),
jvm.org.apache.flink.api.common.eventtime.NoWatermarksGenerator
))
| 3,845 | 47.075 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/common/tests/test_typeinfo.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.typeinfo import Types, RowTypeInfo, TupleTypeInfo, _from_java_type
from pyflink.testing.test_case_utils import PyFlinkTestCase
class TypeInfoTests(PyFlinkTestCase):
def test_row_type(self):
self.assertEqual(RowTypeInfo([Types.STRING(), Types.STRING()])
.get_field_names(), ['f0', 'f1'])
self.assertEqual(RowTypeInfo([Types.STRING(), Types.STRING()],
['a', 'b']).get_field_names(), ['a', 'b'])
self.assertEqual(RowTypeInfo([Types.STRING(), Types.STRING()],
['a', 'b']) == RowTypeInfo([Types.STRING(),
Types.STRING()], ['a', 'b']), True)
self.assertEqual(RowTypeInfo([Types.STRING(),
Types.STRING()],
['a', 'b']) == RowTypeInfo([Types.STRING(),
Types.INT()],
['a', 'b']), False)
self.assertEqual(RowTypeInfo([Types.STRING(),
Types.STRING()],
['a', 'b']).__str__(), "RowTypeInfo(a: String, b: String)")
self.assertEqual(Types.ROW([Types.STRING(), Types.STRING()]),
RowTypeInfo([Types.STRING(), Types.STRING()]), True)
self.assertEqual(Types.ROW_NAMED(['a', 'b'], [Types.STRING(), Types.STRING()])
.get_field_names(), ['a', 'b'], True)
self.assertEqual(Types.ROW_NAMED(['a', 'b'], [Types.STRING(), Types.STRING()])
.get_field_types(), [Types.STRING(), Types.STRING()], True)
def test_tuple_type(self):
self.assertEqual(TupleTypeInfo([Types.STRING(), Types.INT()]),
TupleTypeInfo([Types.STRING(), Types.INT()]), True)
self.assertEqual(TupleTypeInfo([Types.STRING(), Types.INT()]).__str__(),
"TupleTypeInfo(String, Integer)")
self.assertNotEqual(TupleTypeInfo([Types.STRING(), Types.INT()]),
TupleTypeInfo([Types.STRING(), Types.BOOLEAN()]))
self.assertEqual(Types.TUPLE([Types.STRING(), Types.INT()]),
TupleTypeInfo([Types.STRING(), Types.INT()]))
self.assertEqual(Types.TUPLE([Types.STRING(), Types.INT()]).get_field_types(),
[Types.STRING(), Types.INT()])
def test_from_java_type(self):
basic_int_type_info = Types.INT()
self.assertEqual(basic_int_type_info,
_from_java_type(basic_int_type_info.get_java_type_info()))
basic_short_type_info = Types.SHORT()
self.assertEqual(basic_short_type_info,
_from_java_type(basic_short_type_info.get_java_type_info()))
basic_long_type_info = Types.LONG()
self.assertEqual(basic_long_type_info,
_from_java_type(basic_long_type_info.get_java_type_info()))
basic_float_type_info = Types.FLOAT()
self.assertEqual(basic_float_type_info,
_from_java_type(basic_float_type_info.get_java_type_info()))
basic_double_type_info = Types.DOUBLE()
self.assertEqual(basic_double_type_info,
_from_java_type(basic_double_type_info.get_java_type_info()))
basic_char_type_info = Types.CHAR()
self.assertEqual(basic_char_type_info,
_from_java_type(basic_char_type_info.get_java_type_info()))
basic_byte_type_info = Types.BYTE()
self.assertEqual(basic_byte_type_info,
_from_java_type(basic_byte_type_info.get_java_type_info()))
basic_big_int_type_info = Types.BIG_INT()
self.assertEqual(basic_big_int_type_info,
_from_java_type(basic_big_int_type_info.get_java_type_info()))
basic_big_dec_type_info = Types.BIG_DEC()
self.assertEqual(basic_big_dec_type_info,
_from_java_type(basic_big_dec_type_info.get_java_type_info()))
basic_sql_date_type_info = Types.SQL_DATE()
self.assertEqual(basic_sql_date_type_info,
_from_java_type(basic_sql_date_type_info.get_java_type_info()))
basic_sql_time_type_info = Types.SQL_TIME()
self.assertEqual(basic_sql_time_type_info,
_from_java_type(basic_sql_time_type_info.get_java_type_info()))
basic_sql_timestamp_type_info = Types.SQL_TIMESTAMP()
self.assertEqual(basic_sql_timestamp_type_info,
_from_java_type(basic_sql_timestamp_type_info.get_java_type_info()))
row_type_info = Types.ROW([Types.INT(), Types.STRING()])
self.assertEqual(row_type_info, _from_java_type(row_type_info.get_java_type_info()))
tuple_type_info = Types.TUPLE([Types.CHAR(), Types.INT()])
self.assertEqual(tuple_type_info, _from_java_type(tuple_type_info.get_java_type_info()))
primitive_int_array_type_info = Types.PRIMITIVE_ARRAY(Types.INT())
self.assertEqual(primitive_int_array_type_info,
_from_java_type(primitive_int_array_type_info.get_java_type_info()))
object_array_type_info = Types.OBJECT_ARRAY(Types.SQL_DATE())
self.assertEqual(object_array_type_info,
_from_java_type(object_array_type_info.get_java_type_info()))
pickled_byte_array_type_info = Types.PICKLED_BYTE_ARRAY()
self.assertEqual(pickled_byte_array_type_info,
_from_java_type(pickled_byte_array_type_info.get_java_type_info()))
sql_date_type_info = Types.SQL_DATE()
self.assertEqual(sql_date_type_info,
_from_java_type(sql_date_type_info.get_java_type_info()))
map_type_info = Types.MAP(Types.INT(), Types.STRING())
self.assertEqual(map_type_info,
_from_java_type(map_type_info.get_java_type_info()))
list_type_info = Types.LIST(Types.INT())
self.assertEqual(list_type_info,
_from_java_type(list_type_info.get_java_type_info()))
| 7,263 | 48.753425 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/common/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/common/tests/test_serialization_schemas.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common.serialization import SimpleStringSchema
from pyflink.testing.test_case_utils import PyFlinkTestCase
class SimpleStringSchemaTests(PyFlinkTestCase):
def test_simple_string_schema(self):
expected_string = 'test string'
simple_string_schema = SimpleStringSchema()
self.assertEqual(expected_string.encode(encoding='utf-8'),
simple_string_schema._j_serialization_schema.serialize(expected_string))
self.assertEqual(expected_string, simple_string_schema._j_deserialization_schema
.deserialize(expected_string.encode(encoding='utf-8')))
| 1,598 | 48.96875 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/common/tests/test_configuration.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from copy import deepcopy
from pyflink.common import Configuration
from pyflink.testing.test_case_utils import PyFlinkTestCase
class ConfigurationTests(PyFlinkTestCase):
def test_init(self):
conf = Configuration()
self.assertEqual(conf.to_dict(), dict())
conf.set_string("k1", "v1")
conf2 = Configuration(conf)
self.assertEqual(conf2.to_dict(), {"k1": "v1"})
def test_getters_and_setters(self):
conf = Configuration()
conf.set_string("str", "v1")
conf.set_integer("int", 2)
conf.set_boolean("bool", True)
conf.set_float("float", 0.5)
conf.set_bytearray("bytearray", bytearray([1, 2, 3]))
str_value = conf.get_string("str", "")
int_value = conf.get_integer("int", 0)
bool_value = conf.get_boolean("bool", False)
float_value = conf.get_float("float", 0)
bytearray_value = conf.get_bytearray("bytearray", bytearray())
self.assertEqual(str_value, "v1")
self.assertEqual(int_value, 2)
self.assertEqual(bool_value, True)
self.assertEqual(float_value, 0.5)
self.assertEqual(bytearray_value, bytearray([1, 2, 3]))
def test_key_set(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_string("k2", "v2")
conf.set_string("k3", "v3")
key_set = conf.key_set()
self.assertEqual(key_set, {"k1", "k2", "k3"})
def test_add_all_to_dict(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
conf.set_float("k3", 1.2)
conf.set_boolean("k4", True)
conf.set_bytearray("k5", bytearray([1, 2, 3]))
target_dict = dict()
conf.add_all_to_dict(target_dict)
self.assertEqual(target_dict, {"k1": "v1",
"k2": 1,
"k3": 1.2,
"k4": True,
"k5": bytearray([1, 2, 3])})
def test_add_all(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf2 = Configuration()
conf2.add_all(conf)
value1 = conf2.get_string("k1", "")
self.assertEqual(value1, "v1")
conf2.add_all(conf, "conf_")
value2 = conf2.get_string("conf_k1", "")
self.assertEqual(value2, "v1")
def test_deepcopy(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf2 = deepcopy(conf)
self.assertEqual(conf2, conf)
conf2.set_string("k1", "v2")
self.assertNotEqual(conf2, conf)
def test_contains_key(self):
conf = Configuration()
conf.set_string("k1", "v1")
contains_k1 = conf.contains_key("k1")
contains_k2 = conf.contains_key("k2")
self.assertTrue(contains_k1)
self.assertFalse(contains_k2)
def test_to_dict(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
conf.set_float("k3", 1.2)
conf.set_boolean("k4", True)
target_dict = conf.to_dict()
self.assertEqual(target_dict, {"k1": "v1", "k2": "1", "k3": "1.2", "k4": "true"})
def test_remove_config(self):
conf = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
self.assertTrue(conf.contains_key("k1"))
self.assertTrue(conf.contains_key("k2"))
self.assertTrue(conf.remove_config("k1"))
self.assertFalse(conf.remove_config("k1"))
self.assertFalse(conf.contains_key("k1"))
conf.remove_config("k2")
self.assertFalse(conf.contains_key("k2"))
def test_hash_equal_str(self):
conf = Configuration()
conf2 = Configuration()
conf.set_string("k1", "v1")
conf.set_integer("k2", 1)
conf2.set_string("k1", "v1")
self.assertNotEqual(hash(conf), hash(conf2))
self.assertNotEqual(conf, conf2)
conf2.set_integer("k2", 1)
self.assertEqual(hash(conf), hash(conf2))
self.assertEqual(conf, conf2)
self.assertEqual(str(conf), "{k1=v1, k2=1}")
| 5,183 | 30.228916 | 89 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/event_time_timer.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Time, WatermarkStrategy, Duration
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.functions import KeyedProcessFunction, RuntimeContext
from pyflink.datastream.state import ValueStateDescriptor, StateTtlConfig
class Sum(KeyedProcessFunction):
def __init__(self):
self.state = None
def open(self, runtime_context: RuntimeContext):
state_descriptor = ValueStateDescriptor("state", Types.FLOAT())
state_ttl_config = StateTtlConfig \
.new_builder(Time.seconds(1)) \
.set_update_type(StateTtlConfig.UpdateType.OnReadAndWrite) \
.disable_cleanup_in_background() \
.build()
state_descriptor.enable_time_to_live(state_ttl_config)
self.state = runtime_context.get_state(state_descriptor)
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
# retrieve the current count
current = self.state.value()
if current is None:
current = 0
# update the state's count
current += value[2]
self.state.update(current)
# register an event time timer 2 seconds later
ctx.timer_service().register_event_time_timer(ctx.timestamp() + 2000)
def on_timer(self, timestamp: int, ctx: 'KeyedProcessFunction.OnTimerContext'):
yield ctx.get_current_key(), self.state.value()
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp: int) -> int:
return int(value[0])
def event_timer_timer_demo():
env = StreamExecutionEnvironment.get_execution_environment()
ds = env.from_collection(
collection=[
(1000, 'Alice', 110.1),
(4000, 'Bob', 30.2),
(3000, 'Alice', 20.0),
(2000, 'Bob', 53.1),
(5000, 'Alice', 13.1),
(3000, 'Bob', 3.1),
(7000, 'Bob', 16.1),
(10000, 'Alice', 20.1)
],
type_info=Types.TUPLE([Types.LONG(), Types.STRING(), Types.FLOAT()]))
ds = ds.assign_timestamps_and_watermarks(
WatermarkStrategy.for_bounded_out_of_orderness(Duration.of_seconds(2))
.with_timestamp_assigner(MyTimestampAssigner()))
# apply the process function onto a keyed stream
ds.key_by(lambda value: value[1]) \
.process(Sum()) \
.print()
# submit for execution
env.execute()
if __name__ == '__main__':
event_timer_timer_demo()
| 3,576 | 36.260417 | 83 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/basic_operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
import logging
import sys
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
def show(ds, env):
ds.print()
env.execute()
def basic_operations():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
# define the source
ds = env.from_collection(
collection=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
type_info=Types.ROW_NAMED(["id", "info"], [Types.INT(), Types.STRING()])
)
# map
def update_tel(data):
# parse the json
json_data = json.loads(data.info)
json_data['tel'] += 1
return data.id, json.dumps(json_data)
show(ds.map(update_tel), env)
# (1, '{"name": "Flink", "tel": 124, "addr": {"country": "Germany", "city": "Berlin"}}')
# (2, '{"name": "hello", "tel": 136, "addr": {"country": "China", "city": "Shanghai"}}')
# (3, '{"name": "world", "tel": 125, "addr": {"country": "USA", "city": "NewYork"}}')
# (4, '{"name": "PyFlink", "tel": 33, "addr": {"country": "China", "city": "Hangzhou"}}')
# filter
show(ds.filter(lambda data: data.id == 1).map(update_tel), env)
# (1, '{"name": "Flink", "tel": 124, "addr": {"country": "Germany", "city": "Berlin"}}')
# key by
show(ds.map(lambda data: (json.loads(data.info)['addr']['country'],
json.loads(data.info)['tel']))
.key_by(lambda data: data[0]).sum(1), env)
# ('Germany', 123)
# ('China', 135)
# ('USA', 124)
# ('China', 167)
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
basic_operations()
| 3,014 | 38.155844 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/state_access.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import Time
from pyflink.common.typeinfo import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.functions import KeyedProcessFunction, RuntimeContext
from pyflink.datastream.state import ValueStateDescriptor, StateTtlConfig
class Sum(KeyedProcessFunction):
def __init__(self):
self.state = None
def open(self, runtime_context: RuntimeContext):
state_descriptor = ValueStateDescriptor("state", Types.FLOAT())
state_ttl_config = StateTtlConfig \
.new_builder(Time.seconds(1)) \
.set_update_type(StateTtlConfig.UpdateType.OnReadAndWrite) \
.disable_cleanup_in_background() \
.build()
state_descriptor.enable_time_to_live(state_ttl_config)
self.state = runtime_context.get_state(state_descriptor)
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
# retrieve the current count
current = self.state.value()
if current is None:
current = 0
# update the state's count
current += value[1]
self.state.update(current)
yield value[0], current
def state_access_demo():
env = StreamExecutionEnvironment.get_execution_environment()
ds = env.from_collection(
collection=[
('Alice', 110.1),
('Bob', 30.2),
('Alice', 20.0),
('Bob', 53.1),
('Alice', 13.1),
('Bob', 3.1),
('Bob', 16.1),
('Alice', 20.1)
],
type_info=Types.TUPLE([Types.STRING(), Types.FLOAT()]))
# apply the process function onto a keyed stream
ds.key_by(lambda value: value[0]) \
.process(Sum()) \
.print()
# submit for execution
env.execute()
if __name__ == '__main__':
state_access_demo()
| 2,819 | 34.25 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/process_json_data.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
import logging
import sys
from pyflink.datastream import StreamExecutionEnvironment
def process_json_data():
env = StreamExecutionEnvironment.get_execution_environment()
# define the source
ds = env.from_collection(
collection=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')]
)
def update_tel(data):
# parse the json
json_data = json.loads(data[1])
json_data['tel'] += 1
return data[0], json_data
def filter_by_country(data):
# the json data could be accessed directly, there is no need to parse it again using
# json.loads
return "China" in data[1]['addr']['country']
ds.map(update_tel).filter(filter_by_country).print()
# submit for execution
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
process_json_data()
| 2,227 | 37.413793 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/word_count.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import argparse
import logging
import sys
from pyflink.common import WatermarkStrategy, Encoder, Types
from pyflink.datastream import StreamExecutionEnvironment, RuntimeExecutionMode
from pyflink.datastream.connectors.file_system import (FileSource, StreamFormat, FileSink,
OutputFileConfig, RollingPolicy)
word_count_data = ["To be, or not to be,--that is the question:--",
"Whether 'tis nobler in the mind to suffer",
"The slings and arrows of outrageous fortune",
"Or to take arms against a sea of troubles,",
"And by opposing end them?--To die,--to sleep,--",
"No more; and by a sleep to say we end",
"The heartache, and the thousand natural shocks",
"That flesh is heir to,--'tis a consummation",
"Devoutly to be wish'd. To die,--to sleep;--",
"To sleep! perchance to dream:--ay, there's the rub;",
"For in that sleep of death what dreams may come,",
"When we have shuffled off this mortal coil,",
"Must give us pause: there's the respect",
"That makes calamity of so long life;",
"For who would bear the whips and scorns of time,",
"The oppressor's wrong, the proud man's contumely,",
"The pangs of despis'd love, the law's delay,",
"The insolence of office, and the spurns",
"That patient merit of the unworthy takes,",
"When he himself might his quietus make",
"With a bare bodkin? who would these fardels bear,",
"To grunt and sweat under a weary life,",
"But that the dread of something after death,--",
"The undiscover'd country, from whose bourn",
"No traveller returns,--puzzles the will,",
"And makes us rather bear those ills we have",
"Than fly to others that we know not of?",
"Thus conscience does make cowards of us all;",
"And thus the native hue of resolution",
"Is sicklied o'er with the pale cast of thought;",
"And enterprises of great pith and moment,",
"With this regard, their currents turn awry,",
"And lose the name of action.--Soft you now!",
"The fair Ophelia!--Nymph, in thy orisons",
"Be all my sins remember'd."]
def word_count(input_path, output_path):
env = StreamExecutionEnvironment.get_execution_environment()
env.set_runtime_mode(RuntimeExecutionMode.BATCH)
# write all the data to one file
env.set_parallelism(1)
# define the source
if input_path is not None:
ds = env.from_source(
source=FileSource.for_record_stream_format(StreamFormat.text_line_format(),
input_path)
.process_static_file_set().build(),
watermark_strategy=WatermarkStrategy.for_monotonous_timestamps(),
source_name="file_source"
)
else:
print("Executing word_count example with default input data set.")
print("Use --input to specify file input.")
ds = env.from_collection(word_count_data)
def split(line):
yield from line.split()
# compute word count
ds = ds.flat_map(split) \
.map(lambda i: (i, 1), output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.key_by(lambda i: i[0]) \
.reduce(lambda i, j: (i[0], i[1] + j[1]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
required=False,
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
word_count(known_args.input, known_args.output)
| 5,963 | 43.177778 | 91 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/streaming_word_count.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import argparse
import logging
import sys
from pyflink.common import Encoder, Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors.file_system import (FileSink, OutputFileConfig, RollingPolicy)
from pyflink.table import StreamTableEnvironment, TableDescriptor, Schema, DataTypes
words = ["flink", "window", "timer", "event_time", "processing_time", "state",
"connector", "pyflink", "checkpoint", "watermark", "sideoutput", "sql",
"datastream", "broadcast", "asyncio", "catalog", "batch", "streaming"]
max_word_id = len(words) - 1
def word_count(output_path):
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source
# randomly select 5 words per second from a predefined list
t_env.create_temporary_table(
'source',
TableDescriptor.for_connector('datagen')
.schema(Schema.new_builder()
.column('word_id', DataTypes.INT())
.build())
.option('fields.word_id.kind', 'random')
.option('fields.word_id.min', '0')
.option('fields.word_id.max', str(max_word_id))
.option('rows-per-second', '5')
.build())
table = t_env.from_path('source')
ds = t_env.to_data_stream(table)
def id_to_word(r):
# word_id is the first column of the input row
return words[r[0]]
# compute word count
ds = ds.map(id_to_word) \
.map(lambda i: (i, 1), output_type=Types.TUPLE([Types.STRING(), Types.INT()])) \
.key_by(lambda i: i[0]) \
.reduce(lambda i, j: (i[0], i[1] + j[1]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
word_count(known_args.output)
| 3,849 | 37.118812 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/windowing/tumbling_count_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
import argparse
from typing import Iterable
from pyflink.datastream.connectors.file_system import FileSink, OutputFileConfig, RollingPolicy
from pyflink.common import Types, Encoder
from pyflink.datastream import StreamExecutionEnvironment, WindowFunction
from pyflink.datastream.window import CountWindow
class SumWindowFunction(WindowFunction[tuple, tuple, str, CountWindow]):
def apply(self, key: str, window: CountWindow, inputs: Iterable[tuple]):
result = 0
for i in inputs:
result += i[0]
return [(key, result)]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
output_path = known_args.output
env = StreamExecutionEnvironment.get_execution_environment()
# write all the data to one file
env.set_parallelism(1)
# define the source
data_stream = env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello'), (6, 'hello')],
type_info=Types.TUPLE([Types.INT(), Types.STRING()]))
ds = data_stream.key_by(lambda x: x[1], key_type=Types.STRING()) \
.count_window(2) \
.apply(SumWindowFunction(), Types.TUPLE([Types.STRING(), Types.INT()]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
| 3,052 | 35.783133 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/windowing/sliding_time_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
import argparse
from typing import Iterable
from pyflink.datastream.connectors.file_system import FileSink, OutputFileConfig, RollingPolicy
from pyflink.common import Types, WatermarkStrategy, Time, Encoder
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import StreamExecutionEnvironment, ProcessWindowFunction
from pyflink.datastream.window import SlidingEventTimeWindows, TimeWindow
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
class CountWindowProcessFunction(ProcessWindowFunction[tuple, tuple, str, TimeWindow]):
def process(self,
key: str,
context: ProcessWindowFunction.Context[TimeWindow],
elements: Iterable[tuple]) -> Iterable[tuple]:
return [(key, context.window().start, context.window().end, len([e for e in elements]))]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
output_path = known_args.output
env = StreamExecutionEnvironment.get_execution_environment()
# write all the data to one file
env.set_parallelism(1)
# define the source
data_stream = env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 5), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()]))
# define the watermark strategy
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(MyTimestampAssigner())
ds = data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(SlidingEventTimeWindows.of(Time.milliseconds(5), Time.milliseconds(2))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.INT(), Types.INT(), Types.INT()]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
| 3,773 | 38.726316 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/windowing/session_with_dynamic_gap_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
import argparse
from typing import Iterable
from pyflink.datastream.connectors.file_system import FileSink, OutputFileConfig, RollingPolicy
from pyflink.common import Types, WatermarkStrategy, Encoder
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import StreamExecutionEnvironment, ProcessWindowFunction
from pyflink.datastream.window import EventTimeSessionWindows, \
SessionWindowTimeGapExtractor, TimeWindow
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
class MySessionWindowTimeGapExtractor(SessionWindowTimeGapExtractor):
def extract(self, element: tuple) -> int:
return element[1]
class CountWindowProcessFunction(ProcessWindowFunction[tuple, tuple, str, TimeWindow]):
def process(self,
key: str,
context: ProcessWindowFunction.Context[TimeWindow],
elements: Iterable[tuple]) -> Iterable[tuple]:
return [(key, context.window().start, context.window().end, len([e for e in elements]))]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
output_path = known_args.output
env = StreamExecutionEnvironment.get_execution_environment()
# write all the data to one file
env.set_parallelism(1)
# define the source
data_stream = env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()]))
# define the watermark strategy
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(MyTimestampAssigner())
ds = data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_dynamic_gap(MySessionWindowTimeGapExtractor())) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.INT(), Types.INT(), Types.INT()]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
| 3,942 | 38.039604 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/windowing/session_with_gap_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
import argparse
from typing import Iterable
from pyflink.datastream.connectors.file_system import FileSink, RollingPolicy, OutputFileConfig
from pyflink.common import Types, WatermarkStrategy, Time, Encoder
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import StreamExecutionEnvironment, ProcessWindowFunction
from pyflink.datastream.window import EventTimeSessionWindows, \
SessionWindowTimeGapExtractor, TimeWindow
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
class MySessionWindowTimeGapExtractor(SessionWindowTimeGapExtractor):
def extract(self, element: tuple) -> int:
return element[1]
class CountWindowProcessFunction(ProcessWindowFunction[tuple, tuple, str, TimeWindow]):
def process(self,
key: str,
context: ProcessWindowFunction.Context[TimeWindow],
elements: Iterable[tuple]) -> Iterable[tuple]:
return [(key, context.window().start, context.window().end, len([e for e in elements]))]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
output_path = known_args.output
env = StreamExecutionEnvironment.get_execution_environment()
# write all the data to one file
env.set_parallelism(1)
# define the source
data_stream = env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()]))
# define the watermark strategy
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(MyTimestampAssigner())
ds = data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(EventTimeSessionWindows.with_gap(Time.milliseconds(5))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.INT(), Types.INT(), Types.INT()]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
| 3,927 | 37.891089 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/windowing/tumbling_time_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
import argparse
from typing import Iterable
from pyflink.datastream.connectors.file_system import FileSink, OutputFileConfig, RollingPolicy
from pyflink.common import Types, WatermarkStrategy, Time, Encoder
from pyflink.common.watermark_strategy import TimestampAssigner
from pyflink.datastream import StreamExecutionEnvironment, ProcessWindowFunction
from pyflink.datastream.window import TumblingEventTimeWindows, TimeWindow
class MyTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value, record_timestamp) -> int:
return int(value[1])
class CountWindowProcessFunction(ProcessWindowFunction[tuple, tuple, str, TimeWindow]):
def process(self,
key: str,
context: ProcessWindowFunction.Context[TimeWindow],
elements: Iterable[tuple]) -> Iterable[tuple]:
return [(key, context.window().start, context.window().end, len([e for e in elements]))]
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
output_path = known_args.output
env = StreamExecutionEnvironment.get_execution_environment()
# write all the data to one file
env.set_parallelism(1)
# define the source
data_stream = env.from_collection([
('hi', 1), ('hi', 2), ('hi', 3), ('hi', 4), ('hi', 5), ('hi', 8), ('hi', 9), ('hi', 15)],
type_info=Types.TUPLE([Types.STRING(), Types.INT()]))
# define the watermark strategy
watermark_strategy = WatermarkStrategy.for_monotonous_timestamps() \
.with_timestamp_assigner(MyTimestampAssigner())
ds = data_stream.assign_timestamps_and_watermarks(watermark_strategy) \
.key_by(lambda x: x[0], key_type=Types.STRING()) \
.window(TumblingEventTimeWindows.of(Time.milliseconds(5))) \
.process(CountWindowProcessFunction(),
Types.TUPLE([Types.STRING(), Types.INT(), Types.INT(), Types.INT()]))
# define the sink
if output_path is not None:
ds.sink_to(
sink=FileSink.for_row_format(
base_path=output_path,
encoder=Encoder.simple_string_encoder())
.with_output_file_config(
OutputFileConfig.builder()
.with_part_prefix("prefix")
.with_part_suffix(".ext")
.build())
.with_rolling_policy(RollingPolicy.default_rolling_policy())
.build()
)
else:
print("Printing result to stdout. Use --output to specify output path.")
ds.print()
# submit for execution
env.execute()
| 3,753 | 38.515789 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/connectors/pulsar.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common import SimpleStringSchema, WatermarkStrategy
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors.pulsar import PulsarSource, PulsarSink, StartCursor, \
StopCursor, DeliveryGuarantee, TopicRoutingMode
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
PULSAR_SQL_CONNECTOR_PATH = 'file:///path/to/flink-sql-connector-pulsar-1.16.0.jar'
SERVICE_URL = 'pulsar://localhost:6650'
ADMIN_URL = 'http://localhost:8080'
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
env.add_jars(PULSAR_SQL_CONNECTOR_PATH)
pulsar_source = PulsarSource.builder() \
.set_service_url(SERVICE_URL) \
.set_admin_url(ADMIN_URL) \
.set_topics('ada') \
.set_start_cursor(StartCursor.latest()) \
.set_unbounded_stop_cursor(StopCursor.never()) \
.set_subscription_name('pyflink_subscription') \
.set_deserialization_schema(SimpleStringSchema()) \
.set_config('pulsar.source.enableAutoAcknowledgeMessage', True) \
.set_properties({'pulsar.source.autoCommitCursorInterval': '1000'}) \
.build()
ds = env.from_source(source=pulsar_source,
watermark_strategy=WatermarkStrategy.for_monotonous_timestamps(),
source_name="pulsar source")
pulsar_sink = PulsarSink.builder() \
.set_service_url(SERVICE_URL) \
.set_admin_url(ADMIN_URL) \
.set_producer_name('pyflink_producer') \
.set_topics('beta') \
.set_serialization_schema(SimpleStringSchema()) \
.set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
.set_topic_routing_mode(TopicRoutingMode.ROUND_ROBIN) \
.set_config('pulsar.producer.maxPendingMessages', 1000) \
.set_properties({'pulsar.producer.batchingMaxMessages': '100'}) \
.build()
ds.sink_to(pulsar_sink).name('pulsar sink')
env.execute()
| 3,022 | 42.811594 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/connectors/kafka_avro_format.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors.kafka import FlinkKafkaProducer, FlinkKafkaConsumer
from pyflink.datastream.formats.avro import AvroRowSerializationSchema, AvroRowDeserializationSchema
# Make sure that the Kafka cluster is started and the topic 'test_avro_topic' is
# created before executing this job.
def write_to_kafka(env):
ds = env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello'), (6, 'hello')],
type_info=Types.ROW([Types.INT(), Types.STRING()]))
serialization_schema = AvroRowSerializationSchema(
avro_schema_string="""
{
"type": "record",
"name": "TestRecord",
"fields": [
{"name": "id", "type": "int"},
{"name": "name", "type": "string"}
]
}"""
)
kafka_producer = FlinkKafkaProducer(
topic='test_avro_topic',
serialization_schema=serialization_schema,
producer_config={'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
)
# note that the output type of ds must be RowTypeInfo
ds.add_sink(kafka_producer)
env.execute()
def read_from_kafka(env):
deserialization_schema = AvroRowDeserializationSchema(
avro_schema_string="""
{
"type": "record",
"name": "TestRecord",
"fields": [
{"name": "id", "type": "int"},
{"name": "name", "type": "string"}
]
}"""
)
kafka_consumer = FlinkKafkaConsumer(
topics='test_avro_topic',
deserialization_schema=deserialization_schema,
properties={'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group_1'}
)
kafka_consumer.set_start_from_earliest()
env.add_source(kafka_consumer).print()
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
env = StreamExecutionEnvironment.get_execution_environment()
env.add_jars("file:///path/to/flink-sql-avro-1.15.0.jar",
"file:///path/to/flink-sql-connector-kafka-1.15.0.jar")
print("start writing data to kafka")
write_to_kafka(env)
print("start reading data from kafka")
read_from_kafka(env)
| 3,437 | 35.967742 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/connectors/elasticsearch.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.datastream.connectors.elasticsearch import Elasticsearch6SinkBuilder, \
Elasticsearch7SinkBuilder, FlushBackoffType, ElasticsearchEmitter
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors import DeliveryGuarantee
def write_to_es6(env):
ELASTICSEARCH_SQL_CONNECTOR_PATH = \
'file:///path/to/flink-sql-connector-elasticsearch6-1.16.0.jar'
env.add_jars(ELASTICSEARCH_SQL_CONNECTOR_PATH)
ds = env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es_sink = Elasticsearch6SinkBuilder() \
.set_emitter(ElasticsearchEmitter.static_index('foo', 'id', 'bar')) \
.set_hosts(['localhost:9200']) \
.set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
.set_bulk_flush_max_actions(1) \
.set_bulk_flush_max_size_mb(2) \
.set_bulk_flush_interval(1000) \
.set_bulk_flush_backoff_strategy(FlushBackoffType.CONSTANT, 3, 3000) \
.set_connection_username('foo') \
.set_connection_password('bar') \
.set_connection_path_prefix('foo-bar') \
.set_connection_request_timeout(30000) \
.set_connection_timeout(31000) \
.set_socket_timeout(32000) \
.build()
ds.sink_to(es_sink).name('es6 sink')
env.execute()
def write_to_es6_dynamic_index(env):
ELASTICSEARCH_SQL_CONNECTOR_PATH = \
'file:///path/to/flink-sql-connector-elasticsearch6-1.16.0.jar'
env.add_jars(ELASTICSEARCH_SQL_CONNECTOR_PATH)
ds = env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es_sink = Elasticsearch6SinkBuilder() \
.set_emitter(ElasticsearchEmitter.dynamic_index('name', 'id', 'bar')) \
.set_hosts(['localhost:9200']) \
.build()
ds.sink_to(es_sink).name('es6 dynamic index sink')
env.execute()
def write_to_es7(env):
ELASTICSEARCH_SQL_CONNECTOR_PATH = \
'file:///path/to/flink-sql-connector-elasticsearch7-1.16.0.jar'
env.add_jars(ELASTICSEARCH_SQL_CONNECTOR_PATH)
ds = env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es7_sink = Elasticsearch7SinkBuilder() \
.set_emitter(ElasticsearchEmitter.static_index('foo', 'id')) \
.set_hosts(['localhost:9200']) \
.set_delivery_guarantee(DeliveryGuarantee.AT_LEAST_ONCE) \
.set_bulk_flush_max_actions(1) \
.set_bulk_flush_max_size_mb(2) \
.set_bulk_flush_interval(1000) \
.set_bulk_flush_backoff_strategy(FlushBackoffType.CONSTANT, 3, 3000) \
.set_connection_username('foo') \
.set_connection_password('bar') \
.set_connection_path_prefix('foo-bar') \
.set_connection_request_timeout(30000) \
.set_connection_timeout(31000) \
.set_socket_timeout(32000) \
.build()
ds.sink_to(es7_sink).name('es7 sink')
env.execute()
def write_to_es7_dynamic_index(env):
ELASTICSEARCH_SQL_CONNECTOR_PATH = \
'file:///path/to/flink-sql-connector-elasticsearch7-1.16.0.jar'
env.add_jars(ELASTICSEARCH_SQL_CONNECTOR_PATH)
ds = env.from_collection(
[{'name': 'ada', 'id': '1'}, {'name': 'luna', 'id': '2'}],
type_info=Types.MAP(Types.STRING(), Types.STRING()))
es7_sink = Elasticsearch7SinkBuilder() \
.set_emitter(ElasticsearchEmitter.dynamic_index('name', 'id')) \
.set_hosts(['localhost:9200']) \
.build()
ds.sink_to(es7_sink).name('es7 dynamic index sink')
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
print("start writing data to elasticsearch6")
write_to_es6(env)
write_to_es6_dynamic_index(env)
print("start writing data to elasticsearch7")
write_to_es7(env)
write_to_es7_dynamic_index(env)
| 5,186 | 35.787234 | 84 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/connectors/kafka_csv_format.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors.kafka import FlinkKafkaProducer, FlinkKafkaConsumer
from pyflink.datastream.formats.csv import CsvRowSerializationSchema
from pyflink.datastream.formats.json import JsonRowDeserializationSchema
# Make sure that the Kafka cluster is started and the topic 'test_csv_topic' is
# created before executing this job.
def write_to_kafka(env):
type_info = Types.ROW([Types.INT(), Types.STRING()])
ds = env.from_collection([
(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello'), (6, 'hello')],
type_info=type_info)
serialization_schema = CsvRowSerializationSchema.Builder(type_info).build()
kafka_producer = FlinkKafkaProducer(
topic='test_csv_topic',
serialization_schema=serialization_schema,
producer_config={'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
)
# note that the output type of ds must be RowTypeInfo
ds.add_sink(kafka_producer)
env.execute()
def read_from_kafka(env):
deserialization_schema = JsonRowDeserializationSchema.Builder() \
.type_info(Types.ROW([Types.INT(), Types.STRING()])) \
.build()
kafka_consumer = FlinkKafkaConsumer(
topics='test_csv_topic',
deserialization_schema=deserialization_schema,
properties={'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group_1'}
)
kafka_consumer.set_start_from_earliest()
env.add_source(kafka_consumer).print()
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
env = StreamExecutionEnvironment.get_execution_environment()
env.add_jars("file:///path/to/flink-sql-connector-kafka-1.15.0.jar")
print("start writing data to kafka")
write_to_kafka(env)
print("start reading data from kafka")
read_from_kafka(env)
| 2,970 | 38.613333 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/datastream/connectors/kafka_json_format.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.datastream.connectors.kafka import FlinkKafkaProducer, FlinkKafkaConsumer
from pyflink.datastream.formats.json import JsonRowSerializationSchema, JsonRowDeserializationSchema
# Make sure that the Kafka cluster is started and the topic 'test_json_topic' is
# created before executing this job.
def write_to_kafka(env):
type_info = Types.ROW([Types.INT(), Types.STRING()])
ds = env.from_collection(
[(1, 'hi'), (2, 'hello'), (3, 'hi'), (4, 'hello'), (5, 'hi'), (6, 'hello'), (6, 'hello')],
type_info=type_info)
serialization_schema = JsonRowSerializationSchema.Builder() \
.with_type_info(type_info) \
.build()
kafka_producer = FlinkKafkaProducer(
topic='test_json_topic',
serialization_schema=serialization_schema,
producer_config={'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group'}
)
# note that the output type of ds must be RowTypeInfo
ds.add_sink(kafka_producer)
env.execute()
def read_from_kafka(env):
deserialization_schema = JsonRowDeserializationSchema.Builder() \
.type_info(Types.ROW([Types.INT(), Types.STRING()])) \
.build()
kafka_consumer = FlinkKafkaConsumer(
topics='test_json_topic',
deserialization_schema=deserialization_schema,
properties={'bootstrap.servers': 'localhost:9092', 'group.id': 'test_group_1'}
)
kafka_consumer.set_start_from_earliest()
env.add_source(kafka_consumer).print()
env.execute()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
env = StreamExecutionEnvironment.get_execution_environment()
env.add_jars("file:///path/to/flink-sql-connector-kafka-1.15.0.jar")
print("start writing data to kafka")
write_to_kafka(env)
print("start reading data from kafka")
read_from_kafka(env)
| 2,971 | 38.626667 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/mixing_use_of_datastream_and_table.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import (DataTypes, TableDescriptor, Schema, StreamTableEnvironment)
from pyflink.table.expressions import col
from pyflink.table.udf import udf
def mixing_use_of_datastream_and_table():
# use StreamTableEnvironment instead of TableEnvironment when mixing use of table & datastream
env = StreamExecutionEnvironment.get_execution_environment()
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source
t_env.create_temporary_table(
'source',
TableDescriptor.for_connector('datagen')
.schema(Schema.new_builder()
.column('id', DataTypes.BIGINT())
.column('data', DataTypes.STRING())
.build())
.option("number-of-rows", "10")
.build())
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('a', DataTypes.BIGINT())
.build())
.build())
@udf(result_type=DataTypes.BIGINT())
def length(data):
return len(data)
# perform table api operations
table = t_env.from_path("source")
table = table.select(col('id'), length(col('data')))
# convert table to datastream and perform datastream api operations
ds = t_env.to_data_stream(table)
ds = ds.map(lambda i: i[0] + i[1], output_type=Types.LONG())
# convert datastream to table and perform table api operations as you want
table = t_env.from_data_stream(
ds,
Schema.new_builder().column("f0", DataTypes.BIGINT()).build())
# execute
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
mixing_use_of_datastream_and_table()
| 3,315 | 39.439024 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/basic_operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
import logging
import sys
from pyflink.common import Row
from pyflink.table import (DataTypes, TableEnvironment, EnvironmentSettings, ExplainDetail)
from pyflink.table.expressions import *
from pyflink.table.udf import udtf, udf, udaf, AggregateFunction, TableAggregateFunction, udtaf
def basic_operations():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
right_table = t_env.from_elements(elements=[(1, 18), (2, 30), (3, 25), (4, 10)],
schema=['id', 'age'])
table = table.add_columns(
col('data').json_value('$.name', DataTypes.STRING()).alias('name'),
col('data').json_value('$.tel', DataTypes.STRING()).alias('tel'),
col('data').json_value('$.addr.country', DataTypes.STRING()).alias('country')) \
.drop_columns(col('data'))
table.execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | tel | country |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | Flink | 123 | Germany |
# | +I | 2 | hello | 135 | China |
# | +I | 3 | world | 124 | USA |
# | +I | 4 | PyFlink | 32 | China |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# limit the number of outputs
table.limit(3).execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | tel | country |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | Flink | 123 | Germany |
# | +I | 2 | hello | 135 | China |
# | +I | 3 | world | 124 | USA |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# filter
table.filter(col('id') != 3).execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | tel | country |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | Flink | 123 | Germany |
# | +I | 2 | hello | 135 | China |
# | +I | 4 | PyFlink | 32 | China |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# aggregation
table.group_by(col('country')) \
.select(col('country'), col('id').count, col('tel').cast(DataTypes.BIGINT()).max) \
.execute().print()
# +----+--------------------------------+----------------------+----------------------+
# | op | country | EXPR$0 | EXPR$1 |
# +----+--------------------------------+----------------------+----------------------+
# | +I | Germany | 1 | 123 |
# | +I | USA | 1 | 124 |
# | +I | China | 1 | 135 |
# | -U | China | 1 | 135 |
# | +U | China | 2 | 135 |
# +----+--------------------------------+----------------------+----------------------+
# distinct
table.select(col('country')).distinct() \
.execute().print()
# +----+--------------------------------+
# | op | country |
# +----+--------------------------------+
# | +I | Germany |
# | +I | China |
# | +I | USA |
# +----+--------------------------------+
# join
# Note that it still doesn't support duplicate column names between the joined tables
table.join(right_table.rename_columns(col('id').alias('r_id')), col('id') == col('r_id')) \
.execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+----------------------+----------------------+
# | op | id | name | tel | country | r_id | age |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+----------------------+----------------------+
# | +I | 4 | PyFlink | 32 | China | 4 | 10 |
# | +I | 1 | Flink | 123 | Germany | 1 | 18 |
# | +I | 2 | hello | 135 | China | 2 | 30 |
# | +I | 3 | world | 124 | USA | 3 | 25 |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+----------------------+----------------------+
# join lateral
@udtf(result_types=[DataTypes.STRING()])
def split(r: Row):
for s in r.name.split("i"):
yield s
table.join_lateral(split.alias('a')) \
.execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | tel | country | a |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | Flink | 123 | Germany | Fl |
# | +I | 1 | Flink | 123 | Germany | nk |
# | +I | 2 | hello | 135 | China | hello |
# | +I | 3 | world | 124 | USA | world |
# | +I | 4 | PyFlink | 32 | China | PyFl |
# | +I | 4 | PyFlink | 32 | China | nk |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# show schema
table.print_schema()
# (
# `id` BIGINT,
# `name` STRING,
# `tel` STRING,
# `country` STRING
# )
# show execute plan
print(table.join_lateral(split.alias('a')).explain())
# == Abstract Syntax Tree ==
# LogicalCorrelate(correlation=[$cor1], joinType=[inner], requiredColumns=[{}])
# :- LogicalProject(id=[$0], name=[JSON_VALUE($1, _UTF-16LE'$.name', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR))], tel=[JSON_VALUE($1, _UTF-16LE'$.tel', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR))], country=[JSON_VALUE($1, _UTF-16LE'$.addr.country', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR))])
# : +- LogicalTableScan(table=[[default_catalog, default_database, Unregistered_TableSource_249535355, source: [PythonInputFormatTableSource(id, data)]]])
# +- LogicalTableFunctionScan(invocation=[*org.apache.flink.table.functions.python.PythonTableFunction$1f0568d1f39bef59b4c969a5d620ba46*($0, $1, $2, $3)], rowType=[RecordType(VARCHAR(2147483647) a)], elementType=[class [Ljava.lang.Object;])
#
# == Optimized Physical Plan ==
# PythonCorrelate(invocation=[*org.apache.flink.table.functions.python.PythonTableFunction$1f0568d1f39bef59b4c969a5d620ba46*($0, $1, $2, $3)], correlate=[table(split(id,name,tel,country))], select=[id,name,tel,country,a], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) name, VARCHAR(2147483647) tel, VARCHAR(2147483647) country, VARCHAR(2147483647) a)], joinType=[INNER])
# +- Calc(select=[id, JSON_VALUE(data, _UTF-16LE'$.name', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR)) AS name, JSON_VALUE(data, _UTF-16LE'$.tel', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR)) AS tel, JSON_VALUE(data, _UTF-16LE'$.addr.country', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR)) AS country])
# +- LegacyTableSourceScan(table=[[default_catalog, default_database, Unregistered_TableSource_249535355, source: [PythonInputFormatTableSource(id, data)]]], fields=[id, data])
#
# == Optimized Execution Plan ==
# PythonCorrelate(invocation=[*org.apache.flink.table.functions.python.PythonTableFunction$1f0568d1f39bef59b4c969a5d620ba46*($0, $1, $2, $3)], correlate=[table(split(id,name,tel,country))], select=[id,name,tel,country,a], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) name, VARCHAR(2147483647) tel, VARCHAR(2147483647) country, VARCHAR(2147483647) a)], joinType=[INNER])
# +- Calc(select=[id, JSON_VALUE(data, '$.name', NULL, ON EMPTY, NULL, ON ERROR) AS name, JSON_VALUE(data, '$.tel', NULL, ON EMPTY, NULL, ON ERROR) AS tel, JSON_VALUE(data, '$.addr.country', NULL, ON EMPTY, NULL, ON ERROR) AS country])
# +- LegacyTableSourceScan(table=[[default_catalog, default_database, Unregistered_TableSource_249535355, source: [PythonInputFormatTableSource(id, data)]]], fields=[id, data])
# show execute plan with advice
print(table.join_lateral(split.alias('a')).explain(ExplainDetail.PLAN_ADVICE))
# == Abstract Syntax Tree ==
# LogicalCorrelate(correlation=[$cor2], joinType=[inner], requiredColumns=[{}])
# :- LogicalProject(id=[$0], name=[JSON_VALUE($1, _UTF-16LE'$.name', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR))], tel=[JSON_VALUE($1, _UTF-16LE'$.tel', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR))], country=[JSON_VALUE($1, _UTF-16LE'$.addr.country', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR))])
# : +- LogicalTableScan(table=[[*anonymous_python-input-format$1*]])
# +- LogicalTableFunctionScan(invocation=[*org.apache.flink.table.functions.python.PythonTableFunction$720258394f6a31d31376164d23142f53*($0, $1, $2, $3)], rowType=[RecordType(VARCHAR(2147483647) a)])
#
# == Optimized Physical Plan With Advice ==
# PythonCorrelate(invocation=[*org.apache.flink.table.functions.python.PythonTableFunction$720258394f6a31d31376164d23142f53*($0, $1, $2, $3)], correlate=[table(*org.apache.flink.table.functions.python.PythonTableFunction$720258394f6a31d31376164d23142f53*(id,name,tel,country))], select=[id,name,tel,country,a], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) name, VARCHAR(2147483647) tel, VARCHAR(2147483647) country, VARCHAR(2147483647) a)], joinType=[INNER])
# +- Calc(select=[id, JSON_VALUE(data, _UTF-16LE'$.name', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR)) AS name, JSON_VALUE(data, _UTF-16LE'$.tel', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR)) AS tel, JSON_VALUE(data, _UTF-16LE'$.addr.country', FLAG(NULL), FLAG(ON EMPTY), FLAG(NULL), FLAG(ON ERROR)) AS country])
# +- TableSourceScan(table=[[*anonymous_python-input-format$1*]], fields=[id, data])
#
# No available advice...
#
# == Optimized Execution Plan ==
# PythonCorrelate(invocation=[*org.apache.flink.table.functions.python.PythonTableFunction$720258394f6a31d31376164d23142f53*($0, $1, $2, $3)], correlate=[table(*org.apache.flink.table.functions.python.PythonTableFunction$720258394f6a31d31376164d23142f53*(id,name,tel,country))], select=[id,name,tel,country,a], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) name, VARCHAR(2147483647) tel, VARCHAR(2147483647) country, VARCHAR(2147483647) a)], joinType=[INNER])
# +- Calc(select=[id, JSON_VALUE(data, '$.name', NULL, ON EMPTY, NULL, ON ERROR) AS name, JSON_VALUE(data, '$.tel', NULL, ON EMPTY, NULL, ON ERROR) AS tel, JSON_VALUE(data, '$.addr.country', NULL, ON EMPTY, NULL, ON ERROR) AS country])
# +- TableSourceScan(table=[[*anonymous_python-input-format$1*]], fields=[id, data])
def sql_operations():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
t_env.sql_query("SELECT * FROM %s" % table) \
.execute().print()
# +----+----------------------+--------------------------------+
# | op | id | data |
# +----+----------------------+--------------------------------+
# | +I | 1 | {"name": "Flink", "tel": 12... |
# | +I | 2 | {"name": "hello", "tel": 13... |
# | +I | 3 | {"name": "world", "tel": 12... |
# | +I | 4 | {"name": "PyFlink", "tel": ... |
# +----+----------------------+--------------------------------+
# execute sql statement
@udtf(result_types=[DataTypes.STRING(), DataTypes.INT(), DataTypes.STRING()])
def parse_data(data: str):
json_data = json.loads(data)
yield json_data['name'], json_data['tel'], json_data['addr']['country']
t_env.create_temporary_function('parse_data', parse_data)
t_env.execute_sql(
"""
SELECT *
FROM %s, LATERAL TABLE(parse_data(`data`)) t(name, tel, country)
""" % table
).print()
# +----+----------------------+--------------------------------+--------------------------------+-------------+--------------------------------+
# | op | id | data | name | tel | country |
# +----+----------------------+--------------------------------+--------------------------------+-------------+--------------------------------+
# | +I | 1 | {"name": "Flink", "tel": 12... | Flink | 123 | Germany |
# | +I | 2 | {"name": "hello", "tel": 13... | hello | 135 | China |
# | +I | 3 | {"name": "world", "tel": 12... | world | 124 | USA |
# | +I | 4 | {"name": "PyFlink", "tel": ... | PyFlink | 32 | China |
# +----+----------------------+--------------------------------+--------------------------------+-------------+--------------------------------+
# explain sql plan
print(t_env.explain_sql(
"""
SELECT *
FROM %s, LATERAL TABLE(parse_data(`data`)) t(name, tel, country)
""" % table
))
# == Abstract Syntax Tree ==
# LogicalProject(id=[$0], data=[$1], name=[$2], tel=[$3], country=[$4])
# +- LogicalCorrelate(correlation=[$cor1], joinType=[inner], requiredColumns=[{1}])
# :- LogicalTableScan(table=[[default_catalog, default_database, Unregistered_TableSource_734856049, source: [PythonInputFormatTableSource(id, data)]]])
# +- LogicalTableFunctionScan(invocation=[parse_data($cor1.data)], rowType=[RecordType:peek_no_expand(VARCHAR(2147483647) f0, INTEGER f1, VARCHAR(2147483647) f2)])
#
# == Optimized Physical Plan ==
# PythonCorrelate(invocation=[parse_data($1)], correlate=[table(parse_data(data))], select=[id,data,f0,f1,f2], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) data, VARCHAR(2147483647) f0, INTEGER f1, VARCHAR(2147483647) f2)], joinType=[INNER])
# +- LegacyTableSourceScan(table=[[default_catalog, default_database, Unregistered_TableSource_734856049, source: [PythonInputFormatTableSource(id, data)]]], fields=[id, data])
#
# == Optimized Execution Plan ==
# PythonCorrelate(invocation=[parse_data($1)], correlate=[table(parse_data(data))], select=[id,data,f0,f1,f2], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) data, VARCHAR(2147483647) f0, INTEGER f1, VARCHAR(2147483647) f2)], joinType=[INNER])
# +- LegacyTableSourceScan(table=[[default_catalog, default_database, Unregistered_TableSource_734856049, source: [PythonInputFormatTableSource(id, data)]]], fields=[id, data])
# explain sql plan with advice
print(t_env.explain_sql(
"""
SELECT *
FROM %s, LATERAL TABLE(parse_data(`data`)) t(name, tel, country)
""" % table, ExplainDetail.PLAN_ADVICE
))
# == Abstract Syntax Tree ==
# LogicalProject(id=[$0], data=[$1], name=[$2], tel=[$3], country=[$4])
# +- LogicalCorrelate(correlation=[$cor1], joinType=[inner], requiredColumns=[{1}])
# :- LogicalTableScan(table=[[*anonymous_python-input-format$10*]])
# +- LogicalTableFunctionScan(invocation=[parse_data($cor2.data)], rowType=[RecordType:peek_no_expand(VARCHAR(2147483647) f0, INTEGER f1, VARCHAR(2147483647) f2)])
#
# == Optimized Physical Plan With Advice ==
# PythonCorrelate(invocation=[parse_data($1)], correlate=[table(parse_data(data))], select=[id,data,f0,f1,f2], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) data, VARCHAR(2147483647) f0, INTEGER f1, VARCHAR(2147483647) f2)], joinType=[INNER])
# +- TableSourceScan(table=[[*anonymous_python-input-format$10*]], fields=[id, data])
#
# No available advice...
#
# == Optimized Execution Plan ==
# PythonCorrelate(invocation=[parse_data($1)], correlate=[table(parse_data(data))], select=[id,data,f0,f1,f2], rowType=[RecordType(BIGINT id, VARCHAR(2147483647) data, VARCHAR(2147483647) f0, INTEGER f1, VARCHAR(2147483647) f2)], joinType=[INNER])
# +- TableSourceScan(table=[[*anonymous_python-input-format$10*]], fields=[id, data])
def column_operations():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
# add columns
table = table.add_columns(
col('data').json_value('$.name', DataTypes.STRING()).alias('name'),
col('data').json_value('$.tel', DataTypes.STRING()).alias('tel'),
col('data').json_value('$.addr.country', DataTypes.STRING()).alias('country'))
table.execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | data | name | tel | country |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | {"name": "Flink", "tel": 12... | Flink | 123 | Germany |
# | +I | 2 | {"name": "hello", "tel": 13... | hello | 135 | China |
# | +I | 3 | {"name": "world", "tel": 12... | world | 124 | USA |
# | +I | 4 | {"name": "PyFlink", "tel": ... | PyFlink | 32 | China |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# drop columns
table = table.drop_columns(col('data'))
table.execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | tel | country |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | Flink | 123 | Germany |
# | +I | 2 | hello | 135 | China |
# | +I | 3 | world | 124 | USA |
# | +I | 4 | PyFlink | 32 | China |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# rename columns
table = table.rename_columns(col('tel').alias('telephone'))
table.execute().print()
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | telephone | country |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1 | Flink | 123 | Germany |
# | +I | 2 | hello | 135 | China |
# | +I | 3 | world | 124 | USA |
# | +I | 4 | PyFlink | 32 | China |
# +----+----------------------+--------------------------------+--------------------------------+--------------------------------+
# replace columns
table = table.add_or_replace_columns(
concat(col('id').cast(DataTypes.STRING()), '_', col('name')).alias('id'))
table.execute().print()
# +----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# | op | id | name | telephone | country |
# +----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
# | +I | 1_Flink | Flink | 123 | Germany |
# | +I | 2_hello | hello | 135 | China |
# | +I | 3_world | world | 124 | USA |
# | +I | 4_PyFlink | PyFlink | 32 | China |
# +----+--------------------------------+--------------------------------+--------------------------------+--------------------------------+
def row_operations():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "China", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
# map operation
@udf(result_type=DataTypes.ROW([DataTypes.FIELD("id", DataTypes.BIGINT()),
DataTypes.FIELD("country", DataTypes.STRING())]))
def extract_country(input_row: Row):
data = json.loads(input_row.data)
return Row(input_row.id, data['addr']['country'])
table.map(extract_country) \
.execute().print()
# +----+----------------------+--------------------------------+
# | op | id | country |
# +----+----------------------+--------------------------------+
# | +I | 1 | Germany |
# | +I | 2 | China |
# | +I | 3 | China |
# | +I | 4 | China |
# +----+----------------------+--------------------------------+
# flat_map operation
@udtf(result_types=[DataTypes.BIGINT(), DataTypes.STRING()])
def extract_city(input_row: Row):
data = json.loads(input_row.data)
yield input_row.id, data['addr']['city']
table.flat_map(extract_city) \
.execute().print()
# +----+----------------------+--------------------------------+
# | op | f0 | f1 |
# +----+----------------------+--------------------------------+
# | +I | 1 | Berlin |
# | +I | 2 | Shanghai |
# | +I | 3 | NewYork |
# | +I | 4 | Hangzhou |
# +----+----------------------+--------------------------------+
# aggregate operation
class CountAndSumAggregateFunction(AggregateFunction):
def get_value(self, accumulator):
return Row(accumulator[0], accumulator[1])
def create_accumulator(self):
return Row(0, 0)
def accumulate(self, accumulator, input_row):
accumulator[0] += 1
accumulator[1] += int(input_row.tel)
def retract(self, accumulator, input_row):
accumulator[0] -= 1
accumulator[1] -= int(input_row.tel)
def merge(self, accumulator, accumulators):
for other_acc in accumulators:
accumulator[0] += other_acc[0]
accumulator[1] += other_acc[1]
def get_accumulator_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("cnt", DataTypes.BIGINT()),
DataTypes.FIELD("sum", DataTypes.BIGINT())])
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("cnt", DataTypes.BIGINT()),
DataTypes.FIELD("sum", DataTypes.BIGINT())])
count_sum = udaf(CountAndSumAggregateFunction())
table.add_columns(
col('data').json_value('$.name', DataTypes.STRING()).alias('name'),
col('data').json_value('$.tel', DataTypes.STRING()).alias('tel'),
col('data').json_value('$.addr.country', DataTypes.STRING()).alias('country')) \
.group_by(col('country')) \
.aggregate(count_sum.alias("cnt", "sum")) \
.select(col('country'), col('cnt'), col('sum')) \
.execute().print()
# +----+--------------------------------+----------------------+----------------------+
# | op | country | cnt | sum |
# +----+--------------------------------+----------------------+----------------------+
# | +I | China | 3 | 291 |
# | +I | Germany | 1 | 123 |
# +----+--------------------------------+----------------------+----------------------+
# flat_aggregate operation
class Top2(TableAggregateFunction):
def emit_value(self, accumulator):
for v in accumulator:
if v:
yield Row(v)
def create_accumulator(self):
return [None, None]
def accumulate(self, accumulator, input_row):
tel = int(input_row.tel)
if accumulator[0] is None or tel > accumulator[0]:
accumulator[1] = accumulator[0]
accumulator[0] = tel
elif accumulator[1] is None or tel > accumulator[1]:
accumulator[1] = tel
def get_accumulator_type(self):
return DataTypes.ARRAY(DataTypes.BIGINT())
def get_result_type(self):
return DataTypes.ROW(
[DataTypes.FIELD("tel", DataTypes.BIGINT())])
top2 = udtaf(Top2())
table.add_columns(
col('data').json_value('$.name', DataTypes.STRING()).alias('name'),
col('data').json_value('$.tel', DataTypes.STRING()).alias('tel'),
col('data').json_value('$.addr.country', DataTypes.STRING()).alias('country')) \
.group_by(col('country')) \
.flat_aggregate(top2) \
.select(col('country'), col('tel')) \
.execute().print()
# +----+--------------------------------+----------------------+
# | op | country | tel |
# +----+--------------------------------+----------------------+
# | +I | China | 135 |
# | +I | China | 124 |
# | +I | Germany | 123 |
# +----+--------------------------------+----------------------+
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
basic_operations()
sql_operations()
column_operations()
row_operations()
| 34,632 | 70.11499 | 468 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/process_json_data_with_udf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
import logging
import sys
from pyflink.table import (EnvironmentSettings, TableEnvironment, DataTypes, TableDescriptor,
Schema)
from pyflink.table.expressions import col
from pyflink.table.udf import udf
def process_json_data_with_udf():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('id', DataTypes.BIGINT())
.column('data', DataTypes.STRING())
.build())
.build())
# update json columns
@udf(result_type=DataTypes.STRING())
def update_tel(data):
json_data = json.loads(data)
json_data['tel'] += 1
return json.dumps(json_data)
table = table.select(col('id'), update_tel(col('data')))
# execute
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
process_json_data_with_udf()
| 2,910 | 39.430556 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/process_json_data.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.table import (EnvironmentSettings, TableEnvironment, DataTypes, TableDescriptor,
Schema)
from pyflink.table.expressions import col
def process_json_data():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('id', DataTypes.BIGINT())
.column('data', DataTypes.STRING())
.build())
.build())
table = table.select(col('id'), col('data').json_value('$.addr.country', DataTypes.STRING()))
# execute
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
process_json_data()
| 2,685 | 41.634921 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/multi_sink.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.table import (EnvironmentSettings, TableEnvironment, DataTypes)
from pyflink.table.udf import udf
def multi_sink():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
table = t_env.from_elements(
elements=[(1, 'Hello'), (2, 'World'), (3, "Flink"), (4, "PyFlink")],
schema=['id', 'data'])
# define the sink tables
t_env.execute_sql("""
CREATE TABLE first_sink (
id BIGINT,
data VARCHAR
) WITH (
'connector' = 'print'
)
""")
t_env.execute_sql("""
CREATE TABLE second_sink (
id BIGINT,
data VARCHAR
) WITH (
'connector' = 'print'
)
""")
# create a statement set
statement_set = t_env.create_statement_set()
# emit the data with id <= 3 to the "first_sink" via sql statement
statement_set.add_insert_sql("INSERT INTO first_sink SELECT * FROM %s WHERE id <= 3" % table)
# emit the data which contains "Flink" to the "second_sink"
@udf(result_type=DataTypes.BOOLEAN())
def contains_flink(data):
return "Flink" in data
second_table = table.where(contains_flink(table.data))
statement_set.add_insert("second_sink", second_table)
# execute the statement set
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
statement_set.execute().wait()
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
multi_sink()
| 2,684 | 34.328947 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/word_count.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import argparse
import logging
import sys
from pyflink.common import Row
from pyflink.table import (EnvironmentSettings, TableEnvironment, TableDescriptor, Schema,
DataTypes, FormatDescriptor)
from pyflink.table.expressions import lit, col
from pyflink.table.udf import udtf
word_count_data = ["To be, or not to be,--that is the question:--",
"Whether 'tis nobler in the mind to suffer",
"The slings and arrows of outrageous fortune",
"Or to take arms against a sea of troubles,",
"And by opposing end them?--To die,--to sleep,--",
"No more; and by a sleep to say we end",
"The heartache, and the thousand natural shocks",
"That flesh is heir to,--'tis a consummation",
"Devoutly to be wish'd. To die,--to sleep;--",
"To sleep! perchance to dream:--ay, there's the rub;",
"For in that sleep of death what dreams may come,",
"When we have shuffled off this mortal coil,",
"Must give us pause: there's the respect",
"That makes calamity of so long life;",
"For who would bear the whips and scorns of time,",
"The oppressor's wrong, the proud man's contumely,",
"The pangs of despis'd love, the law's delay,",
"The insolence of office, and the spurns",
"That patient merit of the unworthy takes,",
"When he himself might his quietus make",
"With a bare bodkin? who would these fardels bear,",
"To grunt and sweat under a weary life,",
"But that the dread of something after death,--",
"The undiscover'd country, from whose bourn",
"No traveller returns,--puzzles the will,",
"And makes us rather bear those ills we have",
"Than fly to others that we know not of?",
"Thus conscience does make cowards of us all;",
"And thus the native hue of resolution",
"Is sicklied o'er with the pale cast of thought;",
"And enterprises of great pith and moment,",
"With this regard, their currents turn awry,",
"And lose the name of action.--Soft you now!",
"The fair Ophelia!--Nymph, in thy orisons",
"Be all my sins remember'd."]
def word_count(input_path, output_path):
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# write all the data to one file
t_env.get_config().set("parallelism.default", "1")
# define the source
if input_path is not None:
t_env.create_temporary_table(
'source',
TableDescriptor.for_connector('filesystem')
.schema(Schema.new_builder()
.column('word', DataTypes.STRING())
.build())
.option('path', input_path)
.format('csv')
.build())
tab = t_env.from_path('source')
else:
print("Executing word_count example with default input data set.")
print("Use --input to specify file input.")
tab = t_env.from_elements(map(lambda i: (i,), word_count_data),
DataTypes.ROW([DataTypes.FIELD('line', DataTypes.STRING())]))
# define the sink
if output_path is not None:
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('filesystem')
.schema(Schema.new_builder()
.column('word', DataTypes.STRING())
.column('count', DataTypes.BIGINT())
.build())
.option('path', output_path)
.format(FormatDescriptor.for_format('canal-json')
.build())
.build())
else:
print("Printing result to stdout. Use --output to specify output path.")
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('word', DataTypes.STRING())
.column('count', DataTypes.BIGINT())
.build())
.build())
@udtf(result_types=[DataTypes.STRING()])
def split(line: Row):
for s in line[0].split():
yield Row(s)
# compute word count
tab.flat_map(split).alias('word') \
.group_by(col('word')) \
.select(col('word'), lit(1).count) \
.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
dest='input',
required=False,
help='Input file to process.')
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
word_count(known_args.input, known_args.output)
| 6,791 | 45.204082 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/streaming_word_count.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import argparse
import logging
import sys
from pyflink.table import TableEnvironment, EnvironmentSettings, TableDescriptor, Schema,\
DataTypes, FormatDescriptor
from pyflink.table.expressions import col, lit
from pyflink.table.udf import udf
words = ["flink", "window", "timer", "event_time", "processing_time", "state",
"connector", "pyflink", "checkpoint", "watermark", "sideoutput", "sql",
"datastream", "broadcast", "asyncio", "catalog", "batch", "streaming"]
max_word_id = len(words) - 1
def streaming_word_count(output_path):
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
# randomly select 5 words per second from a predefined list
t_env.create_temporary_table(
'source',
TableDescriptor.for_connector('datagen')
.schema(Schema.new_builder()
.column('word_id', DataTypes.INT())
.build())
.option('fields.word_id.kind', 'random')
.option('fields.word_id.min', '0')
.option('fields.word_id.max', str(max_word_id))
.option('rows-per-second', '5')
.build())
tab = t_env.from_path('source')
# define the sink
if output_path is not None:
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('filesystem')
.schema(Schema.new_builder()
.column('word', DataTypes.STRING())
.column('count', DataTypes.BIGINT())
.build())
.option('path', output_path)
.format(FormatDescriptor.for_format('canal-json')
.build())
.build())
else:
print("Printing result to stdout. Use --output to specify output path.")
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('word', DataTypes.STRING())
.column('count', DataTypes.BIGINT())
.build())
.build())
@udf(result_type='string')
def id_to_word(word_id):
return words[word_id]
# compute word count
tab.select(id_to_word(col('word_id'))).alias('word') \
.group_by(col('word')) \
.select(col('word'), lit(1).count) \
.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
parser = argparse.ArgumentParser()
parser.add_argument(
'--output',
dest='output',
required=False,
help='Output file to write results to.')
argv = sys.argv[1:]
known_args, _ = parser.parse_known_args(argv)
streaming_word_count(known_args.output)
| 4,311 | 40.066667 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/windowing/over_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common.time import Instant
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import (DataTypes, TableDescriptor, Schema, StreamTableEnvironment)
from pyflink.table.expressions import col, row_interval, CURRENT_ROW
from pyflink.table.window import Over
def tumble_window_demo():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source with watermark definition
ds = env.from_collection(
collection=[
(Instant.of_epoch_milli(1000), 'Alice', 110.1),
(Instant.of_epoch_milli(4000), 'Bob', 30.2),
(Instant.of_epoch_milli(3000), 'Alice', 20.0),
(Instant.of_epoch_milli(2000), 'Bob', 53.1),
(Instant.of_epoch_milli(5000), 'Alice', 13.1),
(Instant.of_epoch_milli(3000), 'Bob', 3.1),
(Instant.of_epoch_milli(7000), 'Bob', 16.1),
(Instant.of_epoch_milli(10000), 'Alice', 20.1)
],
type_info=Types.ROW([Types.INSTANT(), Types.STRING(), Types.FLOAT()]))
table = t_env.from_data_stream(
ds,
Schema.new_builder()
.column_by_expression("ts", "CAST(f0 AS TIMESTAMP(3))")
.column("f1", DataTypes.STRING())
.column("f2", DataTypes.FLOAT())
.watermark("ts", "ts - INTERVAL '3' SECOND")
.build()
).alias("ts", "name", "price")
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('name', DataTypes.STRING())
.column('total_price', DataTypes.FLOAT())
.build())
.build())
# define the over window operation
table = table.over_window(
Over.partition_by(col("name"))
.order_by(col("ts"))
.preceding(row_interval(2))
.following(CURRENT_ROW)
.alias('w')) \
.select(col('name'), col('price').max.over(col('w')))
# submit for execution
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
tumble_window_demo()
| 3,663 | 39.711111 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/windowing/tumble_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common.time import Instant
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import (DataTypes, TableDescriptor, Schema, StreamTableEnvironment)
from pyflink.table.expressions import lit, col
from pyflink.table.window import Tumble
def tumble_window_demo():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source with watermark definition
ds = env.from_collection(
collection=[
(Instant.of_epoch_milli(1000), 'Alice', 110.1),
(Instant.of_epoch_milli(4000), 'Bob', 30.2),
(Instant.of_epoch_milli(3000), 'Alice', 20.0),
(Instant.of_epoch_milli(2000), 'Bob', 53.1),
(Instant.of_epoch_milli(5000), 'Alice', 13.1),
(Instant.of_epoch_milli(3000), 'Bob', 3.1),
(Instant.of_epoch_milli(7000), 'Bob', 16.1),
(Instant.of_epoch_milli(10000), 'Alice', 20.1)
],
type_info=Types.ROW([Types.INSTANT(), Types.STRING(), Types.FLOAT()]))
table = t_env.from_data_stream(
ds,
Schema.new_builder()
.column_by_expression("ts", "CAST(f0 AS TIMESTAMP(3))")
.column("f1", DataTypes.STRING())
.column("f2", DataTypes.FLOAT())
.watermark("ts", "ts - INTERVAL '3' SECOND")
.build()
).alias("ts", "name", "price")
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('name', DataTypes.STRING())
.column('total_price', DataTypes.FLOAT())
.column('w_start', DataTypes.TIMESTAMP_LTZ())
.column('w_end', DataTypes.TIMESTAMP_LTZ())
.build())
.build())
# define the tumble window operation
table = table.window(Tumble.over(lit(5).seconds).on(col("ts")).alias("w")) \
.group_by(col('name'), col('w')) \
.select(col('name'), col('price').sum, col("w").start, col("w").end)
# submit for execution
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
tumble_window_demo()
| 3,748 | 41.602273 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/windowing/sliding_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common.time import Instant
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import (DataTypes, TableDescriptor, Schema, StreamTableEnvironment)
from pyflink.table.expressions import lit, col
from pyflink.table.window import Slide
def sliding_window_demo():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source with watermark definition
ds = env.from_collection(
collection=[
(Instant.of_epoch_milli(1000), 'Alice', 110.1),
(Instant.of_epoch_milli(4000), 'Bob', 30.2),
(Instant.of_epoch_milli(3000), 'Alice', 20.0),
(Instant.of_epoch_milli(2000), 'Bob', 53.1),
(Instant.of_epoch_milli(5000), 'Alice', 13.1),
(Instant.of_epoch_milli(3000), 'Bob', 3.1),
(Instant.of_epoch_milli(7000), 'Bob', 16.1),
(Instant.of_epoch_milli(10000), 'Alice', 20.1)
],
type_info=Types.ROW([Types.INSTANT(), Types.STRING(), Types.FLOAT()]))
table = t_env.from_data_stream(
ds,
Schema.new_builder()
.column_by_expression("ts", "CAST(f0 AS TIMESTAMP(3))")
.column("f1", DataTypes.STRING())
.column("f2", DataTypes.FLOAT())
.watermark("ts", "ts - INTERVAL '3' SECOND")
.build()
).alias("ts", "name", "price")
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('name', DataTypes.STRING())
.column('total_price', DataTypes.FLOAT())
.column('w_start', DataTypes.TIMESTAMP_LTZ())
.column('w_end', DataTypes.TIMESTAMP_LTZ())
.build())
.build())
# define the sliding window operation
table = table.window(Slide.over(lit(5).seconds).every(lit(2).seconds).on(col("ts")).alias("w"))\
.group_by(col('name'), col('w')) \
.select(col('name'), col('price').sum, col("w").start, col("w").end)
# submit for execution
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
sliding_window_demo()
| 3,770 | 41.852273 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/windowing/session_window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common.time import Instant
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import (DataTypes, TableDescriptor, Schema, StreamTableEnvironment)
from pyflink.table.expressions import lit, col
from pyflink.table.window import Session
def session_window_demo():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source with watermark definition
ds = env.from_collection(
collection=[
(Instant.of_epoch_milli(1000), 'Alice', 110.1),
(Instant.of_epoch_milli(4000), 'Bob', 30.2),
(Instant.of_epoch_milli(3000), 'Alice', 20.0),
(Instant.of_epoch_milli(2000), 'Bob', 53.1),
(Instant.of_epoch_milli(8000), 'Bob', 16.1),
(Instant.of_epoch_milli(10000), 'Alice', 20.1)
],
type_info=Types.ROW([Types.INSTANT(), Types.STRING(), Types.FLOAT()]))
table = t_env.from_data_stream(
ds,
Schema.new_builder()
.column_by_expression("ts", "CAST(f0 AS TIMESTAMP(3))")
.column("f1", DataTypes.STRING())
.column("f2", DataTypes.FLOAT())
.watermark("ts", "ts - INTERVAL '3' SECOND")
.build()
).alias("ts", "name", "price")
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('name', DataTypes.STRING())
.column('total_price', DataTypes.FLOAT())
.column('w_start', DataTypes.TIMESTAMP_LTZ())
.column('w_end', DataTypes.TIMESTAMP_LTZ())
.build())
.build())
# define the session window operation
table = table.window(Session.with_gap(lit(5).seconds).on(col("ts")).alias("w")) \
.group_by(col('name'), col('w')) \
.select(col('name'), col('price').sum, col("w").start, col("w").end)
# submit for execution
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
session_window_demo()
| 3,642 | 41.360465 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/pandas/conversion_from_dataframe.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
import pandas as pd
import numpy as np
from pyflink.table import (DataTypes, TableEnvironment, EnvironmentSettings)
def conversion_from_dataframe():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
t_env.get_config().set("parallelism.default", "1")
# define the source with watermark definition
pdf = pd.DataFrame(np.random.rand(1000, 2))
table = t_env.from_pandas(
pdf,
schema=DataTypes.ROW([DataTypes.FIELD("a", DataTypes.DOUBLE()),
DataTypes.FIELD("b", DataTypes.DOUBLE())]))
print(table.to_pandas())
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
conversion_from_dataframe()
| 1,735 | 37.577778 | 84 |
py
|
flink
|
flink-master/flink-python/pyflink/examples/table/pandas/pandas_udaf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import sys
from pyflink.common.time import Instant
from pyflink.common import Types
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table import (DataTypes, TableDescriptor, Schema, StreamTableEnvironment)
from pyflink.table.expressions import lit, col
from pyflink.table.udf import udaf
from pyflink.table.window import Tumble
def pandas_udaf():
env = StreamExecutionEnvironment.get_execution_environment()
env.set_parallelism(1)
t_env = StreamTableEnvironment.create(stream_execution_environment=env)
# define the source with watermark definition
ds = env.from_collection(
collection=[
(Instant.of_epoch_milli(1000), 'Alice', 110.1),
(Instant.of_epoch_milli(4000), 'Bob', 30.2),
(Instant.of_epoch_milli(3000), 'Alice', 20.0),
(Instant.of_epoch_milli(2000), 'Bob', 53.1),
(Instant.of_epoch_milli(5000), 'Alice', 13.1),
(Instant.of_epoch_milli(3000), 'Bob', 3.1),
(Instant.of_epoch_milli(7000), 'Bob', 16.1),
(Instant.of_epoch_milli(10000), 'Alice', 20.1)
],
type_info=Types.ROW([Types.INSTANT(), Types.STRING(), Types.FLOAT()]))
table = t_env.from_data_stream(
ds,
Schema.new_builder()
.column_by_expression("ts", "CAST(f0 AS TIMESTAMP_LTZ(3))")
.column("f1", DataTypes.STRING())
.column("f2", DataTypes.FLOAT())
.watermark("ts", "ts - INTERVAL '3' SECOND")
.build()
).alias("ts", "name", "price")
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('name', DataTypes.STRING())
.column('total_price', DataTypes.FLOAT())
.column('w_start', DataTypes.TIMESTAMP_LTZ())
.column('w_end', DataTypes.TIMESTAMP_LTZ())
.build())
.build())
@udaf(result_type=DataTypes.FLOAT(), func_type="pandas")
def mean_udaf(v):
return v.mean()
# define the tumble window operation
table = table.window(Tumble.over(lit(5).seconds).on(col("ts")).alias("w")) \
.group_by(col('name'), col('w')) \
.select(col('name'), mean_udaf(col('price')), col("w").start, col("w").end)
# submit for execution
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
pandas_udaf()
| 3,888 | 40.817204 | 139 |
py
|
flink
|
flink-master/flink-python/pyflink/table/table_environment.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import atexit
import os
import sys
import tempfile
import warnings
from typing import Union, List, Tuple, Iterable
from py4j.java_gateway import get_java_class, get_method
from pyflink.common.configuration import Configuration
from pyflink.datastream import StreamExecutionEnvironment
from pyflink.table.sources import TableSource
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.data_stream import DataStream
from pyflink.java_gateway import get_gateway
from pyflink.serializers import BatchedSerializer, PickleSerializer
from pyflink.table import Table, EnvironmentSettings, Expression, ExplainDetail, \
Module, ModuleEntry, TableSink, Schema, ChangelogMode
from pyflink.table.catalog import Catalog
from pyflink.table.serializers import ArrowSerializer
from pyflink.table.statement_set import StatementSet
from pyflink.table.table_config import TableConfig
from pyflink.table.table_descriptor import TableDescriptor
from pyflink.table.table_result import TableResult
from pyflink.table.types import _create_type_verifier, RowType, DataType, \
_infer_schema_from_data, _create_converter, from_arrow_type, RowField, create_arrow_schema, \
_to_java_data_type
from pyflink.table.udf import UserDefinedFunctionWrapper, AggregateFunction, udaf, \
udtaf, TableAggregateFunction
from pyflink.table.utils import to_expression_jarray
from pyflink.util import java_utils
from pyflink.util.java_utils import get_j_env_configuration, is_local_deployment, load_java_class, \
to_j_explain_detail_arr, to_jarray, get_field
__all__ = [
'StreamTableEnvironment',
'TableEnvironment'
]
class TableEnvironment(object):
"""
A table environment is the base class, entry point, and central context for creating Table
and SQL API programs.
It is unified for bounded and unbounded data processing.
A table environment is responsible for:
- Connecting to external systems.
- Registering and retrieving :class:`~pyflink.table.Table` and other meta objects from a
catalog.
- Executing SQL statements.
- Offering further configuration options.
The path in methods such as :func:`create_temporary_view`
should be a proper SQL identifier. The syntax is following
[[catalog-name.]database-name.]object-name, where the catalog name and database are optional.
For path resolution see :func:`use_catalog` and :func:`use_database`. All keywords or other
special characters need to be escaped.
Example: `cat.1`.`db`.`Table` resolves to an object named 'Table' (table is a reserved
keyword, thus must be escaped) in a catalog named 'cat.1' and database named 'db'.
.. note::
This environment is meant for pure table programs. If you would like to convert from or to
other Flink APIs, it might be necessary to use one of the available language-specific table
environments in the corresponding bridging modules.
"""
def __init__(self, j_tenv, serializer=PickleSerializer()):
self._j_tenv = j_tenv
self._serializer = serializer
# When running in MiniCluster, launch the Python UDF worker using the Python executable
# specified by sys.executable if users have not specified it explicitly via configuration
# python.executable.
self._set_python_executable_for_local_executor()
self._config_chaining_optimization()
self._open()
@staticmethod
def create(environment_settings: Union[EnvironmentSettings, Configuration]) \
-> 'TableEnvironment':
"""
Creates a table environment that is the entry point and central context for creating Table
and SQL API programs.
:param environment_settings: The configuration or environment settings used to instantiate
the :class:`~pyflink.table.TableEnvironment`, the name is for backward compatibility.
:return: The :class:`~pyflink.table.TableEnvironment`.
"""
gateway = get_gateway()
if isinstance(environment_settings, Configuration):
environment_settings = EnvironmentSettings.new_instance() \
.with_configuration(environment_settings).build()
elif not isinstance(environment_settings, EnvironmentSettings):
raise TypeError("argument should be EnvironmentSettings or Configuration")
j_tenv = gateway.jvm.TableEnvironment.create(environment_settings._j_environment_settings)
return TableEnvironment(j_tenv)
def from_table_source(self, table_source: 'TableSource') -> 'Table':
"""
Creates a table from a table source.
Example:
::
>>> csv_table_source = CsvTableSource(
... csv_file_path, ['a', 'b'], [DataTypes.STRING(), DataTypes.BIGINT()])
>>> table_env.from_table_source(csv_table_source)
:param table_source: The table source used as table.
:return: The result table.
"""
warnings.warn("Deprecated in 1.11.", DeprecationWarning)
return Table(self._j_tenv.fromTableSource(table_source._j_table_source), self)
def register_catalog(self, catalog_name: str, catalog: Catalog):
"""
Registers a :class:`~pyflink.table.catalog.Catalog` under a unique name.
All tables registered in the :class:`~pyflink.table.catalog.Catalog` can be accessed.
:param catalog_name: The name under which the catalog will be registered.
:param catalog: The catalog to register.
"""
self._j_tenv.registerCatalog(catalog_name, catalog._j_catalog)
def get_catalog(self, catalog_name: str) -> Catalog:
"""
Gets a registered :class:`~pyflink.table.catalog.Catalog` by name.
:param catalog_name: The name to look up the :class:`~pyflink.table.catalog.Catalog`.
:return: The requested catalog, None if there is no
registered catalog with given name.
"""
catalog = self._j_tenv.getCatalog(catalog_name)
if catalog.isPresent():
return Catalog(catalog.get())
else:
return None
def load_module(self, module_name: str, module: Module):
"""
Loads a :class:`~pyflink.table.Module` under a unique name. Modules will be kept
in the loaded order.
ValidationException is thrown when there is already a module with the same name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
:param module: The module instance.
.. versionadded:: 1.12.0
"""
self._j_tenv.loadModule(module_name, module._j_module)
def unload_module(self, module_name: str):
"""
Unloads a :class:`~pyflink.table.Module` with given name.
ValidationException is thrown when there is no module with the given name.
:param module_name: Name of the :class:`~pyflink.table.Module`.
.. versionadded:: 1.12.0
"""
self._j_tenv.unloadModule(module_name)
def use_modules(self, *module_names: str):
"""
Use an array of :class:`~pyflink.table.Module` with given names.
ValidationException is thrown when there is duplicate name or no module with the given name.
:param module_names: Names of the modules to be used.
.. versionadded:: 1.13.0
"""
j_module_names = to_jarray(get_gateway().jvm.String, module_names)
self._j_tenv.useModules(j_module_names)
def create_java_temporary_system_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_java_temporary_function`, system functions are
identified by a global name that is independent of the current catalog and current
database. Thus, this method allows to extend the set of built-in system functions like
TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_java_temporary_system_function("func",
... "java.user.defined.function.class.name")
:param name: The name under which the function will be registered globally.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporarySystemFunction(name, java_function)
def create_temporary_system_function(self, name: str,
function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary system function.
Compared to .. seealso:: :func:`create_temporary_function`, system functions are identified
by a global name that is independent of the current catalog and current database. Thus,
this method allows to extend the set of built-in system functions like TRIM, ABS, etc.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary system function.
Example:
::
>>> table_env.create_temporary_system_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_system_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_system_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function will be registered globally.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporarySystemFunction(name, java_function)
def drop_temporary_system_function(self, name: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param name: The name under which the function has been registered globally.
:return: true if a function existed under the given name and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporarySystemFunction(name)
def create_java_function(self, path: str, function_class_name: str,
ignore_if_exists: bool = None):
"""
Registers a java user defined function class as a catalog function in the given path.
Compared to system functions with a globally defined name, catalog functions are always
(implicitly or explicitly) identified by a catalog and database.
There must not be another function (temporary or permanent) registered under the same path.
Example:
::
>>> table_env.create_java_function("func", "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
:param ignore_if_exists: If a function exists under the given path and this flag is set,
no operation is executed. An exception is thrown otherwise.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
if ignore_if_exists is None:
self._j_tenv.createFunction(path, java_function)
else:
self._j_tenv.createFunction(path, java_function, ignore_if_exists)
def drop_function(self, path: str) -> bool:
"""
Drops a catalog function registered in the given path.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropFunction(path)
def create_java_temporary_function(self, path: str, function_class_name: str):
"""
Registers a java user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_java_temporary_system_function` with a globally
defined name, catalog functions are always (implicitly or explicitly) identified by a
catalog and database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_java_temporary_function("func",
... "java.user.defined.function.class.name")
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function_class_name: The java full qualified class name of the function class
containing the implementation. The function must have a
public no-argument constructor and can be founded in current
Java classloader.
.. versionadded:: 1.12.0
"""
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader() \
.loadClass(function_class_name)
self._j_tenv.createTemporaryFunction(path, java_function)
def create_temporary_function(self, path: str, function: Union[UserDefinedFunctionWrapper,
AggregateFunction]):
"""
Registers a python user defined function class as a temporary catalog function.
Compared to .. seealso:: :func:`create_temporary_system_function` with a globally defined
name, catalog functions are always (implicitly or explicitly) identified by a catalog and
database.
Temporary functions can shadow permanent ones. If a permanent function under a given name
exists, it will be inaccessible in the current session. To make the permanent function
available again one can drop the corresponding temporary function.
Example:
::
>>> table_env.create_temporary_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.create_temporary_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.create_temporary_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:param function: The function class containing the implementation. The function must have a
public no-argument constructor and can be founded in current Java
classloader.
.. versionadded:: 1.12.0
"""
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
self._j_tenv.createTemporaryFunction(path, java_function)
def drop_temporary_function(self, path: str) -> bool:
"""
Drops a temporary system function registered under the given name.
If a permanent function with the given name exists, it will be used from now on for any
queries that reference this name.
:param path: The path under which the function will be registered.
See also the :class:`~pyflink.table.TableEnvironment` class description for
the format of the path.
:return: true if a function existed in the given path and was removed.
.. versionadded:: 1.12.0
"""
return self._j_tenv.dropTemporaryFunction(path)
def create_temporary_table(self, path: str, descriptor: TableDescriptor):
"""
Registers the given :class:`~pyflink.table.TableDescriptor` as a temporary catalog table.
The TableDescriptor is converted into a CatalogTable and stored in the catalog.
Temporary objects can shadow permanent ones. If a permanent object in a given path exists,
it will be inaccessible in the current session. To make the permanent object available again
one can drop the corresponding temporary object.
Examples:
::
>>> table_env.create_temporary_table("MyTable", TableDescriptor.for_connector("datagen")
... .schema(Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build())
... .option("rows-per-second", 10)
... .option("fields.f0.kind", "random")
... .build())
:param path: The path under which the table will be registered.
:param descriptor: Template for creating a CatalogTable instance.
.. versionadded:: 1.14.0
"""
self._j_tenv.createTemporaryTable(path, descriptor._j_table_descriptor)
def create_table(self, path: str, descriptor: TableDescriptor):
"""
Registers the given :class:`~pyflink.table.TableDescriptor` as a catalog table.
The TableDescriptor is converted into a CatalogTable and stored in the catalog.
If the table should not be permanently stored in a catalog, use
:func:`create_temporary_table` instead.
Examples:
::
>>> table_env.create_table("MyTable", TableDescriptor.for_connector("datagen")
... .schema(Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build())
... .option("rows-per-second", 10)
... .option("fields.f0.kind", "random")
... .build())
:param path: The path under which the table will be registered.
:param descriptor: Template for creating a CatalogTable instance.
.. versionadded:: 1.14.0
"""
self._j_tenv.createTable(path, descriptor._j_table_descriptor)
def register_table(self, name: str, table: Table):
"""
Registers a :class:`~pyflink.table.Table` under a unique name in the TableEnvironment's
catalog. Registered tables can be referenced in SQL queries.
Example:
::
>>> tab = table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
>>> table_env.register_table("source", tab)
:param name: The name under which the table will be registered.
:param table: The table to register.
.. note:: Deprecated in 1.10. Use :func:`create_temporary_view` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_temporary_view instead.", DeprecationWarning)
self._j_tenv.registerTable(name, table._j_table)
def register_table_source(self, name: str, table_source: TableSource):
"""
Registers an external :class:`~pyflink.table.TableSource` in this
:class:`~pyflink.table.TableEnvironment`'s catalog. Registered tables can be referenced in
SQL queries.
Example:
::
>>> table_env.register_table_source("source",
... CsvTableSource("./1.csv",
... ["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()]))
:param name: The name under which the table source is registered.
:param table_source: The table source to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_table instead.", DeprecationWarning)
self._j_tenv.registerTableSourceInternal(name, table_source._j_table_source)
def register_table_sink(self, name: str, table_sink: TableSink):
"""
Registers an external :class:`~pyflink.table.TableSink` with given field names and types in
this :class:`~pyflink.table.TableEnvironment`'s catalog. Registered sink tables can be
referenced in SQL DML statements.
Example:
::
>>> table_env.register_table_sink("sink",
... CsvTableSink(["a", "b"],
... [DataTypes.INT(),
... DataTypes.STRING()],
... "./2.csv"))
:param name: The name under which the table sink is registered.
:param table_sink: The table sink to register.
.. note:: Deprecated in 1.10. Use :func:`execute_sql` instead.
"""
warnings.warn("Deprecated in 1.10. Use create_table instead.", DeprecationWarning)
self._j_tenv.registerTableSinkInternal(name, table_sink._j_table_sink)
def scan(self, *table_path: str) -> Table:
"""
Scans a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the TableEnvironment. It can be either directly
registered or be an external member of a :class:`~pyflink.table.catalog.Catalog`.
See the documentation of :func:`~pyflink.table.TableEnvironment.use_database` or
:func:`~pyflink.table.TableEnvironment.use_catalog` for the rules on the path resolution.
Examples:
Scanning a directly registered table
::
>>> tab = table_env.scan("tableName")
Scanning a table from a registered catalog
::
>>> tab = table_env.scan("catalogName", "dbName", "tableName")
:param table_path: The path of the table to scan.
:throws: Exception if no table is found using the given table path.
:return: The resulting table.
.. note:: Deprecated in 1.10. Use :func:`from_path` instead.
"""
warnings.warn("Deprecated in 1.10. Use from_path instead.", DeprecationWarning)
gateway = get_gateway()
j_table_paths = java_utils.to_jarray(gateway.jvm.String, table_path)
j_table = self._j_tenv.scan(j_table_paths)
return Table(j_table, self)
def from_path(self, path: str) -> Table:
"""
Reads a registered table and returns the resulting :class:`~pyflink.table.Table`.
A table to scan must be registered in the :class:`~pyflink.table.TableEnvironment`.
See the documentation of :func:`use_database` or :func:`use_catalog` for the rules on the
path resolution.
Examples:
Reading a table from default catalog and database.
::
>>> tab = table_env.from_path("tableName")
Reading a table from a registered catalog.
::
>>> tab = table_env.from_path("catalogName.dbName.tableName")
Reading a table from a registered catalog with escaping. (`Table` is a reserved keyword).
Dots in e.g. a database name also must be escaped.
::
>>> tab = table_env.from_path("catalogName.`db.Name`.`Table`")
:param path: The path of a table API object to scan.
:return: Either a table or virtual table (=view).
.. seealso:: :func:`use_catalog`
.. seealso:: :func:`use_database`
.. versionadded:: 1.10.0
"""
return Table(get_method(self._j_tenv, "from")(path), self)
def from_descriptor(self, descriptor: TableDescriptor) -> Table:
"""
Returns a Table backed by the given TableDescriptor.
The TableDescriptor is registered as an inline (i.e. anonymous) temporary table
(see :func:`create_temporary_table`) using a unique identifier and then read. Note that
calling this method multiple times, even with the same descriptor, results in multiple
temporary tables. In such cases, it is recommended to register it under a name using
:func:`create_temporary_table` and reference it via :func:`from_path`
Examples:
::
>>> table_env.from_descriptor(TableDescriptor.for_connector("datagen")
... .schema(Schema.new_builder()
... .column("f0", DataTypes.STRING())
... .build())
... .build()
Note that the returned Table is an API object and only contains a pipeline description.
It actually corresponds to a <i>view</i> in SQL terms. Call :func:`execute` in Table to
trigger an execution.
:return: The Table object describing the pipeline for further transformations.
.. versionadded:: 1.14.0
"""
return Table(get_method(self._j_tenv, "from")(descriptor._j_table_descriptor), self)
def list_catalogs(self) -> List[str]:
"""
Gets the names of all catalogs registered in this environment.
:return: List of catalog names.
"""
j_catalog_name_array = self._j_tenv.listCatalogs()
return [item for item in j_catalog_name_array]
def list_modules(self) -> List[str]:
"""
Gets the names of all modules used in this environment.
:return: List of module names.
.. versionadded:: 1.10.0
"""
j_module_name_array = self._j_tenv.listModules()
return [item for item in j_module_name_array]
def list_full_modules(self) -> List[ModuleEntry]:
"""
Gets the names and statuses of all modules loaded in this environment.
:return: List of module names and use statuses.
.. versionadded:: 1.13.0
"""
j_module_entry_array = self._j_tenv.listFullModules()
return [ModuleEntry(entry.name(), entry.used()) for entry in j_module_entry_array]
def list_databases(self) -> List[str]:
"""
Gets the names of all databases in the current catalog.
:return: List of database names in the current catalog.
"""
j_database_name_array = self._j_tenv.listDatabases()
return [item for item in j_database_name_array]
def list_tables(self) -> List[str]:
"""
Gets the names of all tables and views in the current database of the current catalog.
It returns both temporary and permanent tables and views.
:return: List of table and view names in the current database of the current catalog.
"""
j_table_name_array = self._j_tenv.listTables()
return [item for item in j_table_name_array]
def list_views(self) -> List[str]:
"""
Gets the names of all views in the current database of the current catalog.
It returns both temporary and permanent views.
:return: List of view names in the current database of the current catalog.
.. versionadded:: 1.11.0
"""
j_view_name_array = self._j_tenv.listViews()
return [item for item in j_view_name_array]
def list_user_defined_functions(self) -> List[str]:
"""
Gets the names of all user defined functions registered in this environment.
:return: List of the names of all user defined functions registered in this environment.
"""
j_udf_name_array = self._j_tenv.listUserDefinedFunctions()
return [item for item in j_udf_name_array]
def list_functions(self) -> List[str]:
"""
Gets the names of all functions in this environment.
:return: List of the names of all functions in this environment.
.. versionadded:: 1.10.0
"""
j_function_name_array = self._j_tenv.listFunctions()
return [item for item in j_function_name_array]
def list_temporary_tables(self) -> List[str]:
"""
Gets the names of all temporary tables and views available in the current namespace
(the current database of the current catalog).
:return: A list of the names of all registered temporary tables and views in the current
database of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_table_name_array = self._j_tenv.listTemporaryTables()
return [item for item in j_table_name_array]
def list_temporary_views(self) -> List[str]:
"""
Gets the names of all temporary views available in the current namespace (the current
database of the current catalog).
:return: A list of the names of all registered temporary views in the current database
of the current catalog.
.. seealso:: :func:`list_tables`
.. versionadded:: 1.10.0
"""
j_view_name_array = self._j_tenv.listTemporaryViews()
return [item for item in j_view_name_array]
def drop_temporary_table(self, table_path: str) -> bool:
"""
Drops a temporary table registered in the given path.
If a permanent table with a given path exists, it will be used
from now on for any queries that reference this path.
:param table_path: The path of the registered temporary table.
:return: True if a table existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryTable(table_path)
def drop_temporary_view(self, view_path: str) -> bool:
"""
Drops a temporary view registered in the given path.
If a permanent table or view with a given path exists, it will be used
from now on for any queries that reference this path.
:return: True if a view existed in the given path and was removed.
.. versionadded:: 1.10.0
"""
return self._j_tenv.dropTemporaryView(view_path)
def explain_sql(self, stmt: str, *extra_details: ExplainDetail) -> str:
"""
Returns the AST of the specified statement and the execution plan.
:param stmt: The statement for which the AST and execution plan will be returned.
:param extra_details: The extra explain details which the explain result should include,
e.g. estimated cost, changelog mode for streaming
:return: The statement for which the AST and execution plan will be returned.
.. versionadded:: 1.11.0
"""
JExplainFormat = get_gateway().jvm.org.apache.flink.table.api.ExplainFormat
j_extra_details = to_j_explain_detail_arr(extra_details)
return self._j_tenv.explainSql(stmt, JExplainFormat.TEXT, j_extra_details)
def sql_query(self, query: str) -> Table:
"""
Evaluates a SQL query on registered tables and retrieves the result as a
:class:`~pyflink.table.Table`.
All tables referenced by the query must be registered in the TableEnvironment.
A :class:`~pyflink.table.Table` is automatically registered when its
:func:`~Table.__str__` method is called, for example when it is embedded into a String.
Hence, SQL queries can directly reference a :class:`~pyflink.table.Table` as follows:
::
>>> table = ...
# the table is not registered to the table environment
>>> table_env.sql_query("SELECT * FROM %s" % table)
:param query: The sql query string.
:return: The result table.
"""
j_table = self._j_tenv.sqlQuery(query)
return Table(j_table, self)
def execute_sql(self, stmt: str) -> TableResult:
"""
Execute the given single statement, and return the execution result.
The statement can be DDL/DML/DQL/SHOW/DESCRIBE/EXPLAIN/USE.
For DML and DQL, this method returns TableResult once the job has been submitted.
For DDL and DCL statements, TableResult is returned once the operation has finished.
:return content for DQL/SHOW/DESCRIBE/EXPLAIN,
the affected row count for `DML` (-1 means unknown),
or a string message ("OK") for other statements.
.. versionadded:: 1.11.0
"""
self._before_execute()
return TableResult(self._j_tenv.executeSql(stmt))
def create_statement_set(self) -> StatementSet:
"""
Create a StatementSet instance which accepts DML statements or Tables,
the planner can optimize all added statements and Tables together
and then submit as one job.
:return statement_set instance
.. versionadded:: 1.11.0
"""
_j_statement_set = self._j_tenv.createStatementSet()
return StatementSet(_j_statement_set, self)
def get_current_catalog(self) -> str:
"""
Gets the current default catalog name of the current session.
:return: The current default catalog name that is used for the path resolution.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
"""
return self._j_tenv.getCurrentCatalog()
def use_catalog(self, catalog_name: str):
"""
Sets the current catalog to the given value. It also sets the default
database to the catalog's default one.
See also :func:`~TableEnvironment.use_database`.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:param catalog_name: The name of the catalog to set as the current default catalog.
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if a catalog with given
name could not be set as the default one.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
self._j_tenv.useCatalog(catalog_name)
def get_current_database(self) -> str:
"""
Gets the current default database name of the running session.
:return: The name of the current database of the current catalog.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_database`
"""
return self._j_tenv.getCurrentDatabase()
def use_database(self, database_name: str):
"""
Sets the current default database. It has to exist in the current catalog. That path will
be used as the default one when looking for unqualified object names.
This is used during the resolution of object paths. Both the catalog and database are
optional when referencing catalog objects such as tables, views etc. The algorithm looks for
requested objects in following paths in that order:
* ``[current-catalog].[current-database].[requested-path]``
* ``[current-catalog].[requested-path]``
* ``[requested-path]``
Example:
Given structure with default catalog set to ``default_catalog`` and default database set to
``default_database``. ::
root:
|- default_catalog
|- default_database
|- tab1
|- db1
|- tab1
|- cat1
|- db1
|- tab1
The following table describes resolved paths:
+----------------+-----------------------------------------+
| Requested path | Resolved path |
+================+=========================================+
| tab1 | default_catalog.default_database.tab1 |
+----------------+-----------------------------------------+
| db1.tab1 | default_catalog.db1.tab1 |
+----------------+-----------------------------------------+
| cat1.db1.tab1 | cat1.db1.tab1 |
+----------------+-----------------------------------------+
:throws: :class:`~pyflink.util.exceptions.CatalogException` thrown if the given catalog and
database could not be set as the default ones.
.. seealso:: :func:`~pyflink.table.TableEnvironment.use_catalog`
:param database_name: The name of the database to set as the current database.
"""
self._j_tenv.useDatabase(database_name)
def get_config(self) -> TableConfig:
"""
Returns the table config to define the runtime behavior of the Table API.
:return: Current table config.
"""
if not hasattr(self, "table_config"):
table_config = TableConfig()
table_config._j_table_config = self._j_tenv.getConfig()
setattr(self, "table_config", table_config)
return getattr(self, "table_config")
def register_java_function(self, name: str, function_class_name: str):
"""
Registers a java user defined function under a unique name. Replaces already existing
user-defined functions under this name. The acceptable function type contains
**ScalarFunction**, **TableFunction** and **AggregateFunction**.
Example:
::
>>> table_env.register_java_function("func1", "java.user.defined.function.class.name")
:param name: The name under which the function is registered.
:param function_class_name: The java full qualified class name of the function to register.
The function must have a public no-argument constructor and can
be founded in current Java classloader.
.. note:: Deprecated in 1.12. Use :func:`create_java_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_java_temporary_system_function` "
"instead.", DeprecationWarning)
gateway = get_gateway()
java_function = gateway.jvm.Thread.currentThread().getContextClassLoader()\
.loadClass(function_class_name).newInstance()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if not isinstance(self, StreamTableEnvironment) or self.__class__ == TableEnvironment:
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def register_function(self, name: str, function: UserDefinedFunctionWrapper):
"""
Registers a python user-defined function under a unique name. Replaces already existing
user-defined function under this name.
Example:
::
>>> table_env.register_function(
... "add_one", udf(lambda i: i + 1, result_type=DataTypes.BIGINT()))
>>> @udf(result_type=DataTypes.BIGINT())
... def add(i, j):
... return i + j
>>> table_env.register_function("add", add)
>>> class SubtractOne(ScalarFunction):
... def eval(self, i):
... return i - 1
>>> table_env.register_function(
... "subtract_one", udf(SubtractOne(), result_type=DataTypes.BIGINT()))
:param name: The name under which the function is registered.
:param function: The python user-defined function to register.
.. versionadded:: 1.10.0
.. note:: Deprecated in 1.12. Use :func:`create_temporary_system_function` instead.
"""
warnings.warn("Deprecated in 1.12. Use :func:`create_temporary_system_function` "
"instead.", DeprecationWarning)
function = self._wrap_aggregate_function_if_needed(function)
java_function = function._java_user_defined_function()
# this is a temporary solution and will be unified later when we use the new type
# system(DataType) to replace the old type system(TypeInformation).
if self.__class__ == TableEnvironment:
if self._is_table_function(java_function):
self._register_table_function(name, java_function)
elif self._is_aggregate_function(java_function):
self._register_aggregate_function(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
else:
self._j_tenv.registerFunction(name, java_function)
def create_temporary_view(self,
view_path: str,
table_or_data_stream: Union[Table, DataStream],
*fields_or_schema: Union[str, Expression, Schema]):
"""
1. When table_or_data_stream is a :class:`~pyflink.table.Table`:
Registers a :class:`~pyflink.table.Table` API object as a temporary view similar to SQL
temporary views.
Temporary objects can shadow permanent ones. If a permanent object in a given path
exists, it will be inaccessible in the current session. To make the permanent object
available again you can drop the corresponding temporary object.
2. When table_or_data_stream is a :class:`~pyflink.datastream.DataStream`:
2.1 When fields_or_schema is a str or a sequence of :class:`~pyflink.table.Expression`:
Creates a view from the given {@link DataStream} in a given path with specified
field names. Registered views can be referenced in SQL queries.
1. Reference input fields by name: All fields in the schema definition are
referenced by name (and possibly renamed using an alias (as). Moreover, we can
define proctime and rowtime attributes at arbitrary positions using arbitrary names
(except those that exist in the result schema). In this mode, fields can be
reordered and projected out. This mode can be used for any input type, including
POJOs.
Example:
::
>>> stream = ...
# reorder the fields, rename the original 'f0' field to 'name' and add
# event-time attribute named 'rowtime'
# use str
>>> table_env.create_temporary_view(
... "cat.db.myTable",
... stream,
... "f1, rowtime.rowtime, f0 as 'name'")
# or use a sequence of expression
>>> table_env.create_temporary_view(
... "cat.db.myTable",
... stream,
... col("f1"),
... col("rowtime").rowtime,
... col("f0").alias('name'))
2. Reference input fields by position: In this mode, fields are simply renamed.
Event-time attributes can replace the field on their position in the input data
(if it is of correct type) or be appended at the end. Proctime attributes must be
appended at the end. This mode can only be used if the input type has a defined
field order (tuple, case class, Row) and none of the {@code fields} references a
field of the input type.
Example:
::
>>> stream = ...
# rename the original fields to 'a' and 'b' and extract the internally attached
# timestamp into an event-time attribute named 'rowtime'
# use str
>>> table_env.create_temporary_view(
... "cat.db.myTable", stream, "a, b, rowtime.rowtime")
# or use a sequence of expressions
>>> table_env.create_temporary_view(
... "cat.db.myTable",
... stream,
... col("a"),
... col("b"),
... col("rowtime").rowtime)
Temporary objects can shadow permanent ones. If a permanent object in a given path
exists, it will be inaccessible in the current session. To make the permanent object
available again you can drop the corresponding temporary object.
2.2 When fields_or_schema is a :class:`~pyflink.table.Schema`:
Creates a view from the given {@link DataStream} in a given path. Registered views
can be referenced in SQL queries.
See :func:`from_data_stream` for more information on how a
:class:`~pyflink.datastream.DataStream` is translated into a table.
Temporary objects can shadow permanent ones. If a permanent object in a given path
exists, it will be inaccessible in the current session. To make the permanent object
available again you can drop the corresponding temporary object.
.. note:: create_temporary_view by providing a Schema (case 2.) was added from flink
1.14.0.
:param view_path: The path under which the view will be registered. See also the
:class:`~pyflink.table.TableEnvironment` class description for the format
of the path.
:param table_or_data_stream: The Table or DataStream out of which to create the view.
:param fields_or_schema: The fields expressions(str) to map original fields of the
DataStream to the fields of the View or the customized schema for the final
table.
.. versionadded:: 1.10.0
"""
if isinstance(table_or_data_stream, Table):
self._j_tenv.createTemporaryView(view_path, table_or_data_stream._j_table)
else:
j_data_stream = table_or_data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(j_data_stream.getExecutionEnvironment())
if len(fields_or_schema) == 0:
self._j_tenv.createTemporaryView(view_path, j_data_stream)
elif len(fields_or_schema) == 1 and isinstance(fields_or_schema[0], str):
self._j_tenv.createTemporaryView(
view_path,
j_data_stream,
fields_or_schema[0])
elif len(fields_or_schema) == 1 and isinstance(fields_or_schema[0], Schema):
self._j_tenv.createTemporaryView(
view_path,
j_data_stream,
fields_or_schema[0]._j_schema)
elif (len(fields_or_schema) > 0 and
all(isinstance(elem, Expression) for elem in fields_or_schema)):
self._j_tenv.createTemporaryView(
view_path,
j_data_stream,
to_expression_jarray(fields_or_schema))
else:
raise ValueError("Invalid arguments for 'fields': %r" %
','.join([repr(item) for item in fields_or_schema]))
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_files = self.get_config().get(jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
self.get_config().set(jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self,
requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> table_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 20.3) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
self.get_config().set(
jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> table_env.add_python_archive("py_env.zip")
>>> table_env.get_config().set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> table_env.add_python_archive("py_env.zip", "myenv")
>>> table_env.get_config().set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.5 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
.. versionadded:: 1.10.0
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
python_archives = self.get_config().get(jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
self.get_config().set(jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def from_elements(self, elements: Iterable, schema: Union[DataType, List[str]] = None,
verify_schema: bool = True) -> Table:
"""
Creates a table from a collection of elements.
The elements types must be acceptable atomic types or acceptable composite types.
All elements must be of the same type.
If the elements types are composite types, the composite types must be strictly equal,
and its subtypes must also be acceptable types.
e.g. if the elements are tuples, the length of the tuples must be equal, the element types
of the tuples must be equal in order.
The built-in acceptable atomic element types contains:
**int**, **long**, **str**, **unicode**, **bool**,
**float**, **bytearray**, **datetime.date**, **datetime.time**, **datetime.datetime**,
**datetime.timedelta**, **decimal.Decimal**
The built-in acceptable composite element types contains:
**list**, **tuple**, **dict**, **array**, :class:`~pyflink.table.Row`
If the element type is a composite type, it will be unboxed.
e.g. table_env.from_elements([(1, 'Hi'), (2, 'Hello')]) will return a table like:
+----+-------+
| _1 | _2 |
+====+=======+
| 1 | Hi |
+----+-------+
| 2 | Hello |
+----+-------+
"_1" and "_2" are generated field names.
Example:
::
# use the second parameter to specify custom field names
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')], ['a', 'b'])
# use the second parameter to specify custom table schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]))
# use the third parameter to switch whether to verify the elements against the schema
>>> table_env.from_elements([(1, 'Hi'), (2, 'Hello')],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING())]),
... False)
# create Table from expressions
>>> table_env.from_elements([row(1, 'abc', 2.0), row(2, 'def', 3.0)],
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.INT()),
... DataTypes.FIELD("b", DataTypes.STRING()),
... DataTypes.FIELD("c", DataTypes.FLOAT())]))
:param elements: The elements to create a table from.
:param schema: The schema of the table.
:param verify_schema: Whether to verify the elements against the schema.
:return: The result table.
"""
# verifies the elements against the specified schema
if isinstance(schema, RowType):
verify_func = _create_type_verifier(schema) if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
elif isinstance(schema, DataType):
data_type = schema
schema = RowType().add("value", schema)
verify_func = _create_type_verifier(
data_type, name="field value") if verify_schema else lambda _: True
def verify_obj(obj):
verify_func(obj)
return obj
else:
def verify_obj(obj):
return obj
# infers the schema if not specified
if schema is None or isinstance(schema, (list, tuple)):
schema = _infer_schema_from_data(elements, names=schema)
converter = _create_converter(schema)
elements = map(converter, elements)
elif not isinstance(schema, RowType):
raise TypeError(
"schema should be RowType, list, tuple or None, but got: %s" % schema)
elements = list(elements)
# in case all the elements are expressions
if len(elements) > 0 and all(isinstance(elem, Expression) for elem in elements):
if schema is None:
return Table(self._j_tenv.fromValues(to_expression_jarray(elements)), self)
else:
return Table(self._j_tenv.fromValues(_to_java_data_type(schema),
to_expression_jarray(elements)),
self)
elif any(isinstance(elem, Expression) for elem in elements):
raise ValueError("It doesn't support part of the elements are Expression, while the "
"others are not.")
# verifies the elements against the specified schema
elements = map(verify_obj, elements)
# converts python data to sql data
elements = [schema.to_sql_type(element) for element in elements]
return self._from_elements(elements, schema)
def _from_elements(self, elements: List, schema: DataType) -> Table:
"""
Creates a table from a collection of elements.
:param elements: The elements to create a table from.
:return: The result :class:`~pyflink.table.Table`.
"""
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = BatchedSerializer(self._serializer)
try:
with temp_file:
serializer.serialize(elements, temp_file)
j_schema = _to_java_data_type(schema)
gateway = get_gateway()
PythonTableUtils = gateway.jvm \
.org.apache.flink.table.utils.python.PythonTableUtils
j_table = PythonTableUtils.createTableFromElement(
self._j_tenv, temp_file.name, j_schema, True)
return Table(j_table, self)
finally:
atexit.register(lambda: os.unlink(temp_file.name))
def from_pandas(self, pdf,
schema: Union[RowType, List[str], Tuple[str], List[DataType],
Tuple[DataType]] = None,
splits_num: int = 1) -> Table:
"""
Creates a table from a pandas DataFrame.
Example:
::
>>> pdf = pd.DataFrame(np.random.rand(1000, 2))
# use the second parameter to specify custom field names
>>> table_env.from_pandas(pdf, ["a", "b"])
# use the second parameter to specify custom field types
>>> table_env.from_pandas(pdf, [DataTypes.DOUBLE(), DataTypes.DOUBLE()]))
# use the second parameter to specify custom table schema
>>> table_env.from_pandas(pdf,
... DataTypes.ROW([DataTypes.FIELD("a", DataTypes.DOUBLE()),
... DataTypes.FIELD("b", DataTypes.DOUBLE())]))
:param pdf: The pandas DataFrame.
:param schema: The schema of the converted table.
:param splits_num: The number of splits the given Pandas DataFrame will be split into. It
determines the number of parallel source tasks.
If not specified, the default parallelism will be used.
:return: The result table.
.. versionadded:: 1.11.0
"""
import pandas as pd
if not isinstance(pdf, pd.DataFrame):
raise TypeError("Unsupported type, expected pandas.DataFrame, got %s" % type(pdf))
import pyarrow as pa
arrow_schema = pa.Schema.from_pandas(pdf, preserve_index=False)
if schema is not None:
if isinstance(schema, RowType):
result_type = schema
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], str):
result_type = RowType(
[RowField(field_name, from_arrow_type(field.type, field.nullable))
for field_name, field in zip(schema, arrow_schema)])
elif isinstance(schema, (list, tuple)) and isinstance(schema[0], DataType):
result_type = RowType(
[RowField(field_name, field_type) for field_name, field_type in zip(
arrow_schema.names, schema)])
else:
raise TypeError("Unsupported schema type, it could only be of RowType, a "
"list of str or a list of DataType, got %s" % schema)
else:
result_type = RowType([RowField(field.name, from_arrow_type(field.type, field.nullable))
for field in arrow_schema])
# serializes to a file, and we read the file in java
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
import pytz
serializer = ArrowSerializer(
create_arrow_schema(result_type.field_names(), result_type.field_types()),
result_type,
pytz.timezone(self.get_config().get_local_timezone()))
step = -(-len(pdf) // splits_num)
pdf_slices = [pdf.iloc[start:start + step] for start in range(0, len(pdf), step)]
data = [[c for (_, c) in pdf_slice.iteritems()] for pdf_slice in pdf_slices]
try:
with temp_file:
serializer.serialize(data, temp_file)
jvm = get_gateway().jvm
data_type = _to_java_data_type(result_type).notNull()
data_type = data_type.bridgedTo(
load_java_class('org.apache.flink.table.data.RowData'))
j_arrow_table_source = \
jvm.org.apache.flink.table.runtime.arrow.ArrowUtils.createArrowTableSource(
data_type, temp_file.name)
return Table(self._j_tenv.fromTableSource(j_arrow_table_source), self)
finally:
os.unlink(temp_file.name)
def _set_python_executable_for_local_executor(self):
jvm = get_gateway().jvm
j_config = get_j_env_configuration(self._get_j_env())
if not j_config.containsKey(jvm.PythonOptions.PYTHON_EXECUTABLE.key()) \
and is_local_deployment(j_config):
j_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), sys.executable)
def _add_jars_to_j_env_config(self, config_key):
jvm = get_gateway().jvm
jar_urls = self.get_config().get(config_key, None)
if jar_urls is not None:
# normalize
jar_urls_list = []
for url in jar_urls.split(";"):
url = url.strip()
if url != "":
jar_urls_list.append(jvm.java.net.URL(url).toString())
j_configuration = get_j_env_configuration(self._get_j_env())
if j_configuration.containsKey(config_key):
for url in j_configuration.getString(config_key, "").split(";"):
url = url.strip()
if url != "" and url not in jar_urls_list:
jar_urls_list.append(url)
j_configuration.setString(config_key, ";".join(jar_urls_list))
def _get_j_env(self):
return self._j_tenv.getPlanner().getExecEnv()
@staticmethod
def _is_table_function(java_function):
java_function_class = java_function.getClass()
j_table_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.TableFunction)
return j_table_function_class.isAssignableFrom(java_function_class)
@staticmethod
def _is_aggregate_function(java_function):
java_function_class = java_function.getClass()
j_aggregate_function_class = get_java_class(
get_gateway().jvm.org.apache.flink.table.functions.ImperativeAggregateFunction)
return j_aggregate_function_class.isAssignableFrom(java_function_class)
def _register_table_function(self, name, table_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfTableFunction(table_function)
function_catalog.registerTempSystemTableFunction(name, table_function, result_type)
def _register_aggregate_function(self, name, aggregate_function):
function_catalog = self._get_function_catalog()
gateway = get_gateway()
helper = gateway.jvm.org.apache.flink.table.functions.UserDefinedFunctionHelper
result_type = helper.getReturnTypeOfAggregateFunction(aggregate_function)
acc_type = helper.getAccumulatorTypeOfAggregateFunction(aggregate_function)
function_catalog.registerTempSystemAggregateFunction(
name, aggregate_function, result_type, acc_type)
def _get_function_catalog(self):
function_catalog_field = self._j_tenv.getClass().getDeclaredField("functionCatalog")
function_catalog_field.setAccessible(True)
function_catalog = function_catalog_field.get(self._j_tenv)
return function_catalog
def _before_execute(self):
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
self._add_jars_to_j_env_config(jars_key)
self._add_jars_to_j_env_config(classpaths_key)
def _wrap_aggregate_function_if_needed(self, function) -> UserDefinedFunctionWrapper:
if isinstance(function, AggregateFunction):
function = udaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
elif isinstance(function, TableAggregateFunction):
function = udtaf(function,
result_type=function.get_result_type(),
accumulator_type=function.get_accumulator_type(),
name=str(function.__class__.__name__))
return function
def _config_chaining_optimization(self):
JChainingOptimizingExecutor = get_gateway().jvm.org.apache.flink.table.executor.python.\
ChainingOptimizingExecutor
exec_env_field = get_field(self._j_tenv.getClass(), "execEnv")
exec_env_field.set(self._j_tenv,
JChainingOptimizingExecutor(exec_env_field.get(self._j_tenv)))
def _open(self):
# start BeamFnLoopbackWorkerPoolServicer when executed in MiniCluster
def startup_loopback_server():
from pyflink.fn_execution.beam.beam_worker_pool_service import \
BeamFnLoopbackWorkerPoolServicer
self.get_config().set("python.loopback-server.address",
BeamFnLoopbackWorkerPoolServicer().start())
python_worker_execution_mode = os.environ.get('_python_worker_execution_mode')
if python_worker_execution_mode is None:
if is_local_deployment(get_j_env_configuration(self._get_j_env())):
startup_loopback_server()
elif python_worker_execution_mode == 'loopback':
if is_local_deployment(get_j_env_configuration(self._get_j_env())):
startup_loopback_server()
else:
raise ValueError("Loopback mode is enabled, however the job wasn't configured to "
"run in local deployment mode")
elif python_worker_execution_mode != 'process':
raise ValueError(
"It only supports to execute the Python worker in 'loopback' mode and 'process' "
"mode, unknown mode '%s' is configured" % python_worker_execution_mode)
class StreamTableEnvironment(TableEnvironment):
def __init__(self, j_tenv):
super(StreamTableEnvironment, self).__init__(j_tenv)
@staticmethod
def create(stream_execution_environment: StreamExecutionEnvironment = None, # type: ignore
environment_settings: EnvironmentSettings = None) -> 'StreamTableEnvironment':
"""
Creates a :class:`~pyflink.table.StreamTableEnvironment`.
Example:
::
# create with StreamExecutionEnvironment.
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> table_env = StreamTableEnvironment.create(env)
# create with StreamExecutionEnvironment and EnvironmentSettings.
>>> configuration = Configuration()
>>> configuration.set_string('execution.buffer-timeout', '1 min')
>>> environment_settings = EnvironmentSettings \\
... .new_instance() \\
... .in_streaming_mode() \\
... .with_configuration(configuration) \\
... .build()
>>> table_env = StreamTableEnvironment.create(
... env, environment_settings=environment_settings)
# create with EnvironmentSettings.
>>> table_env = StreamTableEnvironment.create(environment_settings=environment_settings)
:param stream_execution_environment: The
:class:`~pyflink.datastream.StreamExecutionEnvironment`
of the TableEnvironment.
:param environment_settings: The environment settings used to instantiate the
TableEnvironment.
:return: The StreamTableEnvironment created from given StreamExecutionEnvironment and
configuration.
"""
if stream_execution_environment is None and \
environment_settings is None:
raise ValueError("No argument found, the param 'stream_execution_environment' "
"or 'environment_settings' is required.")
gateway = get_gateway()
if environment_settings is not None:
if stream_execution_environment is None:
j_tenv = gateway.jvm.TableEnvironment.create(
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment,
environment_settings._j_environment_settings)
else:
j_tenv = gateway.jvm.StreamTableEnvironment.create(
stream_execution_environment._j_stream_execution_environment)
return StreamTableEnvironment(j_tenv)
def from_data_stream(self,
data_stream: DataStream,
*fields_or_schema: Union[Expression, Schema]) -> Table:
"""
1. When fields_or_schema is a sequence of Expression:
Converts the given DataStream into a Table with specified field names.
There are two modes for mapping original fields to the fields of the Table:
1. Reference input fields by name:
All fields in the schema definition are referenced by name (and possibly renamed
using and alias (as). Moreover, we can define proctime and rowtime attributes at
arbitrary positions using arbitrary names (except those that exist in the result
schema). In this mode, fields can be reordered and projected out. This mode can be
used for any input type.
2. Reference input fields by position:
In this mode, fields are simply renamed. Event-time attributes can replace the field
on their position in the input data (if it is of correct type) or be appended at the
end. Proctime attributes must be appended at the end. This mode can only be used if
the input type has a defined field order (tuple, case class, Row) and none of the
fields references a field of the input type.
2. When fields_or_schema is a Schema:
Converts the given DataStream into a Table.
Column names and types of the Table are automatically derived from the TypeInformation
of the DataStream. If the outermost record's TypeInformation is a CompositeType, it will
be flattened in the first level. Composite nested fields will not be accessible.
Since the DataStream API does not support changelog processing natively, this method
assumes append-only/insert-only semantics during the stream-to-table conversion. Records
of class Row must describe RowKind.INSERT changes.
By default, the stream record's timestamp and watermarks are not propagated unless
explicitly declared.
This method allows to declare a Schema for the resulting table. The declaration is
similar to a {@code CREATE TABLE} DDL in SQL and allows to:
1. enrich or overwrite automatically derived columns with a custom DataType
2. reorder columns
3. add computed or metadata columns next to the physical columns
4. access a stream record's timestamp
5. declare a watermark strategy or propagate the DataStream watermarks
It is possible to declare a schema without physical/regular columns. In this case, those
columns will be automatically derived and implicitly put at the beginning of the schema
declaration.
The following examples illustrate common schema declarations and their semantics:
Example:
::
=== EXAMPLE 1 ===
no physical columns defined, they will be derived automatically,
e.g. BigDecimal becomes DECIMAL(38, 18)
>>> Schema.new_builder() \
... .column_by_expression("c1", "f1 + 42") \
... .column_by_expression("c2", "f1 - 1") \
... .build()
equal to: CREATE TABLE (f0 STRING, f1 DECIMAL(38, 18), c1 AS f1 + 42, c2 AS f1 - 1)
=== EXAMPLE 2 ===
physical columns defined, input fields and columns will be mapped by name,
columns are reordered and their data type overwritten,
all columns must be defined to show up in the final table's schema
>>> Schema.new_builder() \
... .column("f1", "DECIMAL(10, 2)") \
... .column_by_expression("c", "f1 - 1") \
... .column("f0", "STRING") \
... .build()
equal to: CREATE TABLE (f1 DECIMAL(10, 2), c AS f1 - 1, f0 STRING)
=== EXAMPLE 3 ===
timestamp and watermarks can be added from the DataStream API,
physical columns will be derived automatically
>>> Schema.new_builder() \
... .column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)") \
... .watermark("rowtime", "SOURCE_WATERMARK()") \
... .build()
equal to:
CREATE TABLE (
f0 STRING,
f1 DECIMAL(38, 18),
rowtime TIMESTAMP(3) METADATA,
WATERMARK FOR rowtime AS SOURCE_WATERMARK()
)
.. note:: create_temporary_view by providing a Schema (case 2.) was added from flink
1.14.0.
:param data_stream: The datastream to be converted.
:param fields_or_schema: The fields expressions to map original fields of the DataStream to
the fields of the Table or the customized schema for the final table.
:return: The converted Table.
.. versionadded:: 1.12.0
"""
j_data_stream = data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(j_data_stream.getExecutionEnvironment())
if len(fields_or_schema) == 0:
return Table(j_table=self._j_tenv.fromDataStream(j_data_stream), t_env=self)
elif all(isinstance(f, Expression) for f in fields_or_schema):
return Table(j_table=self._j_tenv.fromDataStream(
j_data_stream, to_expression_jarray(fields_or_schema)), t_env=self)
elif len(fields_or_schema) == 1 and isinstance(fields_or_schema[0], Schema):
return Table(j_table=self._j_tenv.fromDataStream(
j_data_stream, fields_or_schema[0]._j_schema), t_env=self)
raise ValueError("Invalid arguments for 'fields': %r" % fields_or_schema)
def from_changelog_stream(self,
data_stream: DataStream,
schema: Schema = None,
changelog_mode: ChangelogMode = None) -> Table:
"""
Converts the given DataStream of changelog entries into a Table.
Compared to :func:`from_data_stream`, this method consumes instances of Row and evaluates
the RowKind flag that is contained in every record during runtime. The runtime behavior is
similar to that of a DynamicTableSource.
If you don't specify the changelog_mode, the changelog containing all kinds of changes
(enumerated in RowKind) as the default ChangelogMode.
Column names and types of the Table are automatically derived from the TypeInformation of
the DataStream. If the outermost record's TypeInformation is a CompositeType, it will be
flattened in the first level. Composite nested fields will not be accessible.
By default, the stream record's timestamp and watermarks are not propagated unless
explicitly declared.
This method allows to declare a Schema for the resulting table. The declaration is similar
to a {@code CREATE TABLE} DDL in SQL and allows to:
1. enrich or overwrite automatically derived columns with a custom DataType
2. reorder columns
3. add computed or metadata columns next to the physical columns
4. access a stream record's timestamp
5. declare a watermark strategy or propagate the DataStream watermarks
6. declare a primary key
See :func:`from_data_stream` for more information and examples of how to declare a Schema.
:param data_stream: The changelog stream of Row.
:param schema: The customized schema for the final table.
:param changelog_mode: The expected kinds of changes in the incoming changelog.
:return: The converted Table.
"""
j_data_stream = data_stream._j_data_stream
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(j_data_stream.getExecutionEnvironment())
if schema is None:
return Table(self._j_tenv.fromChangelogStream(j_data_stream), t_env=self)
elif changelog_mode is None:
return Table(
self._j_tenv.fromChangelogStream(j_data_stream, schema._j_schema), t_env=self)
else:
return Table(
self._j_tenv.fromChangelogStream(
j_data_stream,
schema._j_schema,
changelog_mode._j_changelog_mode),
t_env=self)
def to_data_stream(self, table: Table) -> DataStream:
"""
Converts the given Table into a DataStream.
Since the DataStream API does not support changelog processing natively, this method
assumes append-only/insert-only semantics during the table-to-stream conversion. The records
of class Row will always describe RowKind#INSERT changes. Updating tables are
not supported by this method and will produce an exception.
Note that the type system of the table ecosystem is richer than the one of the DataStream
API. The table runtime will make sure to properly serialize the output records to the first
operator of the DataStream API. Afterwards, the Types semantics of the DataStream API
need to be considered.
If the input table contains a single rowtime column, it will be propagated into a stream
record's timestamp. Watermarks will be propagated as well.
:param table: The Table to convert.
:return: The converted DataStream.
"""
return DataStream(self._j_tenv.toDataStream(table._j_table))
def to_changelog_stream(self,
table: Table,
target_schema: Schema = None,
changelog_mode: ChangelogMode = None) -> DataStream:
"""
Converts the given Table into a DataStream of changelog entries.
Compared to :func:`to_data_stream`, this method produces instances of Row and sets the
RowKind flag that is contained in every record during runtime. The runtime behavior is
similar to that of a DynamicTableSink.
If you don't specify the changelog_mode, the changelog containing all kinds of changes
(enumerated in RowKind) as the default ChangelogMode.
The given Schema is used to configure the table runtime to convert columns and internal data
structures to the desired representation. The following example shows how to
convert a table column into a Row type.
Example:
::
>>> table_env.to_changelog_stream(
... table,
... Schema.new_builder() \
... .column("id", DataTypes.BIGINT())
... .column("payload", DataTypes.ROW(
... [DataTypes.FIELD("name", DataTypes.STRING()),
... DataTypes.FIELD("age", DataTypes.INT())]))
... .build())
Note that the type system of the table ecosystem is richer than the one of the DataStream
API. The table runtime will make sure to properly serialize the output records to the first
operator of the DataStream API. Afterwards, the Types semantics of the DataStream API need
to be considered.
If the input table contains a single rowtime column, it will be propagated into a stream
record's timestamp. Watermarks will be propagated as well.
If the rowtime should not be a concrete field in the final Row anymore, or the schema should
be symmetrical for both :func:`from_changelog_stream` and :func:`to_changelog_stream`, the
rowtime can also be declared as a metadata column that will be propagated into a stream
record's timestamp. It is possible to declare a schema without physical/regular columns.
In this case, those columns will be automatically derived and implicitly put at the
beginning of the schema declaration.
The following examples illustrate common schema declarations and their semantics:
Example:
::
given a Table of (id INT, name STRING, my_rowtime TIMESTAMP_LTZ(3))
=== EXAMPLE 1 ===
no physical columns defined, they will be derived automatically,
the last derived physical column will be skipped in favor of the metadata column
>>> Schema.new_builder() \
... .column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)") \
... .build()
equal to: CREATE TABLE (id INT, name STRING, rowtime TIMESTAMP_LTZ(3) METADATA)
=== EXAMPLE 2 ===
physical columns defined, all columns must be defined
>>> Schema.new_builder() \
... .column("id", "INT") \
... .column("name", "STRING") \
... .column_by_metadata("rowtime", "TIMESTAMP_LTZ(3)") \
... .build()
equal to: CREATE TABLE (id INT, name STRING, rowtime TIMESTAMP_LTZ(3) METADATA)
:param table: The Table to convert. It can be updating or insert-only.
:param target_schema: The Schema that decides about the final external representation in
DataStream records.
:param changelog_mode: The required kinds of changes in the result changelog. An exception
will be thrown if the given updating table cannot be represented in this changelog mode.
:return: The converted changelog stream of Row.
"""
if target_schema is None:
return DataStream(self._j_tenv.toChangelogStream(table._j_table))
elif changelog_mode is None:
return DataStream(
self._j_tenv.toChangelogStream(table._j_table, target_schema._j_schema))
else:
return DataStream(
self._j_tenv.toChangelogStream(
table._j_table,
target_schema._j_schema,
changelog_mode._j_changelog_mode))
def to_append_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of a specified type. The Table must only have
insert (append) changes. If the Table is also modified by update or delete changes, the
conversion will fail.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation that specifies the type of the DataStream.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toAppendStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
def to_retract_stream(self, table: Table, type_info: TypeInformation) -> DataStream:
"""
Converts the given Table into a DataStream of add and retract messages. The message will be
encoded as Tuple. The first field is a boolean flag, the second field holds the record of
the specified type.
A true flag indicates an add message, a false flag indicates a retract message.
The fields of the Table are mapped to DataStream as follows: Row and Tuple types: Fields are
mapped by position, field types must match.
:param table: The Table to convert.
:param type_info: The TypeInformation of the requested record type.
:return: The converted DataStream.
.. versionadded:: 1.12.0
"""
j_data_stream = self._j_tenv.toRetractStream(table._j_table, type_info.get_java_type_info())
return DataStream(j_data_stream=j_data_stream)
| 93,413 | 44.746327 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/table/environment_settings.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import warnings
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import create_url_class_loader
from pyflink.common import Configuration
__all__ = ['EnvironmentSettings']
class EnvironmentSettings(object):
"""
Defines all parameters that initialize a table environment. Those parameters are used only
during instantiation of a :class:`~pyflink.table.TableEnvironment` and cannot be changed
afterwards.
Example:
::
>>> EnvironmentSettings.new_instance() \\
... .in_streaming_mode() \\
... .with_built_in_catalog_name("my_catalog") \\
... .with_built_in_database_name("my_database") \\
... .build()
:func:`~EnvironmentSettings.in_streaming_mode` or :func:`~EnvironmentSettings.in_batch_mode`
might be convenient as shortcuts.
"""
class Builder(object):
"""
A builder for :class:`~EnvironmentSettings`.
"""
def __init__(self):
gateway = get_gateway()
self._j_builder = gateway.jvm.EnvironmentSettings.Builder()
def with_configuration(self, config: Configuration) -> 'EnvironmentSettings.Builder':
"""
Creates the EnvironmentSetting with specified Configuration.
:return: EnvironmentSettings.
"""
self._j_builder = self._j_builder.withConfiguration(config._j_configuration)
return self
def in_batch_mode(self) -> 'EnvironmentSettings.Builder':
"""
Sets that the components should work in a batch mode. Streaming mode by default.
:return: This object.
"""
self._j_builder = self._j_builder.inBatchMode()
return self
def in_streaming_mode(self) -> 'EnvironmentSettings.Builder':
"""
Sets that the components should work in a streaming mode. Enabled by default.
:return: This object.
"""
self._j_builder = self._j_builder.inStreamingMode()
return self
def with_built_in_catalog_name(self, built_in_catalog_name: str) \
-> 'EnvironmentSettings.Builder':
"""
Specifies the name of the initial catalog to be created when instantiating
a :class:`~pyflink.table.TableEnvironment`.
This catalog is an in-memory catalog that will be used to store all temporary objects
(e.g. from :func:`~pyflink.table.TableEnvironment.create_temporary_view` or
:func:`~pyflink.table.TableEnvironment.create_temporary_system_function`) that cannot
be persisted because they have no serializable representation.
It will also be the initial value for the current catalog which can be altered via
:func:`~pyflink.table.TableEnvironment.use_catalog`.
Default: "default_catalog".
:param built_in_catalog_name: The specified built-in catalog name.
:return: This object.
"""
self._j_builder = self._j_builder.withBuiltInCatalogName(built_in_catalog_name)
return self
def with_built_in_database_name(self, built_in_database_name: str) \
-> 'EnvironmentSettings.Builder':
"""
Specifies the name of the default database in the initial catalog to be
created when instantiating a :class:`~pyflink.table.TableEnvironment`.
This database is an in-memory database that will be used to store all temporary
objects (e.g. from :func:`~pyflink.table.TableEnvironment.create_temporary_view` or
:func:`~pyflink.table.TableEnvironment.create_temporary_system_function`) that cannot
be persisted because they have no serializable representation.
It will also be the initial value for the current catalog which can be altered via
:func:`~pyflink.table.TableEnvironment.use_catalog`.
Default: "default_database".
:param built_in_database_name: The specified built-in database name.
:return: This object.
"""
self._j_builder = self._j_builder.withBuiltInDatabaseName(built_in_database_name)
return self
def build(self) -> 'EnvironmentSettings':
"""
Returns an immutable instance of EnvironmentSettings.
:return: an immutable instance of EnvironmentSettings.
"""
gateway = get_gateway()
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
new_classloader = create_url_class_loader([], context_classloader)
gateway.jvm.Thread.currentThread().setContextClassLoader(new_classloader)
return EnvironmentSettings(self._j_builder.build())
def __init__(self, j_environment_settings):
self._j_environment_settings = j_environment_settings
def get_built_in_catalog_name(self) -> str:
"""
Gets the specified name of the initial catalog to be created when instantiating a
:class:`~pyflink.table.TableEnvironment`.
:return: The specified name of the initial catalog to be created.
"""
return self._j_environment_settings.getBuiltInCatalogName()
def get_built_in_database_name(self) -> str:
"""
Gets the specified name of the default database in the initial catalog to be created when
instantiating a :class:`~pyflink.table.TableEnvironment`.
:return: The specified name of the default database in the initial catalog to be created.
"""
return self._j_environment_settings.getBuiltInDatabaseName()
def is_streaming_mode(self) -> bool:
"""
Tells if the :class:`~pyflink.table.TableEnvironment` should work in a batch or streaming
mode.
:return: True if the TableEnvironment should work in a streaming mode, false otherwise.
"""
return self._j_environment_settings.isStreamingMode()
def to_configuration(self) -> Configuration:
"""
Convert to `pyflink.common.Configuration`.
:return: Configuration with specified value.
.. note:: Deprecated in 1.15. Please use
:func:`EnvironmentSettings.get_configuration` instead.
"""
warnings.warn("Deprecated in 1.15.", DeprecationWarning)
return Configuration(j_configuration=self._j_environment_settings.toConfiguration())
def get_configuration(self) -> Configuration:
"""
Get the underlying `pyflink.common.Configuration`.
:return: Configuration with specified value.
"""
return Configuration(j_configuration=self._j_environment_settings.getConfiguration())
@staticmethod
def new_instance() -> 'EnvironmentSettings.Builder':
"""
Creates a builder for creating an instance of EnvironmentSettings.
:return: A builder of EnvironmentSettings.
"""
return EnvironmentSettings.Builder()
@staticmethod
def from_configuration(config: Configuration) -> 'EnvironmentSettings':
"""
Creates the EnvironmentSetting with specified Configuration.
:return: EnvironmentSettings.
.. note:: Deprecated in 1.15. Please use
:func:`EnvironmentSettings.Builder.with_configuration` instead.
"""
warnings.warn("Deprecated in 1.15.", DeprecationWarning)
gateway = get_gateway()
context_classloader = gateway.jvm.Thread.currentThread().getContextClassLoader()
new_classloader = create_url_class_loader([], context_classloader)
gateway.jvm.Thread.currentThread().setContextClassLoader(new_classloader)
return EnvironmentSettings(
get_gateway().jvm.EnvironmentSettings.fromConfiguration(config._j_configuration))
@staticmethod
def in_streaming_mode() -> 'EnvironmentSettings':
"""
Creates a default instance of EnvironmentSettings in streaming execution mode.
In this mode, both bounded and unbounded data streams can be processed.
This method is a shortcut for creating a :class:`~pyflink.table.TableEnvironment` with
little code. Use the builder provided in :func:`EnvironmentSettings.new_instance` for
advanced settings.
:return: EnvironmentSettings.
"""
return EnvironmentSettings.new_instance().in_streaming_mode().build()
@staticmethod
def in_batch_mode() -> 'EnvironmentSettings':
"""
Creates a default instance of EnvironmentSettings in batch execution mode.
This mode is highly optimized for batch scenarios. Only bounded data streams can be
processed in this mode.
This method is a shortcut for creating a :class:`~pyflink.table.TableEnvironment` with
little code. Use the builder provided in :func:`EnvironmentSettings.new_instance` for
advanced settings.
:return: EnvironmentSettings.
"""
return EnvironmentSettings.new_instance().in_batch_mode().build()
| 10,112 | 40.109756 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/table/window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from py4j.java_gateway import get_method
from pyflink.java_gateway import get_gateway
from pyflink.table import Expression
from pyflink.table.expression import _get_java_expression
__all__ = [
'Tumble',
'Session',
'Slide',
'Over',
'GroupWindow',
'OverWindow'
]
from pyflink.table.utils import to_expression_jarray
class GroupWindow(object):
"""
A group window specification.
Group windows group rows based on time or row-count intervals and is therefore essentially a
special type of groupBy. Just like groupBy, group windows allow to compute aggregates
on groups of elements.
Infinite streaming tables can only be grouped into time or row intervals. Hence window
grouping is required to apply aggregations on streaming tables.
For finite batch tables, group windows provide shortcuts for time-based groupBy.
"""
def __init__(self, java_window):
self._java_window = java_window
class Tumble(object):
"""
Helper class for creating a tumbling window. Tumbling windows are consecutive, non-overlapping
windows of a specified fixed length. For example, a tumbling window of 5 minutes size groups
elements in 5 minutes intervals.
Example:
::
>>> from pyflink.table.expressions import col, lit
>>> Tumble.over(lit(10).minutes) \\
... .on(col("rowtime")) \\
... .alias("w")
"""
@classmethod
def over(cls, size: Expression) -> 'TumbleWithSize':
"""
Creates a tumbling window. Tumbling windows are fixed-size, consecutive, non-overlapping
windows of a specified fixed length. For example, a tumbling window of 5 minutes size
groups elements in 5 minutes intervals.
:param size: The size of the window as time or row-count interval.
:return: A partially defined tumbling window.
"""
return TumbleWithSize(get_gateway().jvm.Tumble.over(_get_java_expression(size)))
class TumbleWithSize(object):
"""
Tumbling window.
For streaming tables you can specify grouping by a event-time or processing-time attribute.
For batch tables you can specify grouping on a timestamp or long attribute.
"""
def __init__(self, java_window):
self._java_window = java_window
def on(self, time_field: Expression) -> 'TumbleWithSizeOnTime':
"""
Specifies the time attribute on which rows are grouped.
For streaming tables you can specify grouping by a event-time or processing-ti
attribute.
For batch tables you can specify grouping on a timestamp or long attribute.
:param time_field: Time attribute for streaming and batch tables.
:return: A tumbling window on event-time/processing-time.
"""
return TumbleWithSizeOnTime(self._java_window.on(_get_java_expression(time_field)))
class TumbleWithSizeOnTime(object):
"""
Tumbling window on time. You need to assign an alias for the window.
"""
def __init__(self, java_window):
self._java_window = java_window
def alias(self, alias: str) -> 'GroupWindow':
"""
Assigns an alias for this window that the following
:func:`~pyflink.table.GroupWindowedTable.group_by` and
:func:`~pyflink.table.WindowGroupedTable.select` clause can refer to.
:func:`~pyflink.table.WindowGroupedTable.select` statement can access window properties
such as window start or end time.
:param alias: Alias for this window.
:return: This window.
"""
return GroupWindow(get_method(self._java_window, "as")(alias))
class Session(object):
"""
Helper class for creating a session window. The boundary of session windows are defined by
intervals of inactivity, i.e., a session window is closes if no event appears for a defined
gap period.
Example:
::
>>> from pyflink.table.expressions import col, lit
>>> Session.with_gap(lit(10).minutes) \\
... .on(col("rowtime")) \\
... .alias("w")
"""
@classmethod
def with_gap(cls, gap: Expression) -> 'SessionWithGap':
"""
Creates a session window. The boundary of session windows are defined by
intervals of inactivity, i.e., a session window is closes if no event appears for a defined
gap period.
:param gap: Specifies how long (as interval of milliseconds) to wait for new data before
closing the session window.
:return: A partially defined session window.
"""
return SessionWithGap(get_gateway().jvm.Session.withGap(_get_java_expression(gap)))
class SessionWithGap(object):
"""
Session window.
For streaming tables you can specify grouping by a event-time or processing-time attribute.
For batch tables you can specify grouping on a timestamp or long attribute.
"""
def __init__(self, java_window):
self._java_window = java_window
def on(self, time_field: Expression) -> 'SessionWithGapOnTime':
"""
Specifies the time attribute on which rows are grouped.
For streaming tables you can specify grouping by a event-time or processing-time
attribute.
For batch tables you can specify grouping on a timestamp or long attribute.
:param time_field: Time attribute for streaming and batch tables.
:return: A tumbling window on event-time.
"""
return SessionWithGapOnTime(self._java_window.on(_get_java_expression(time_field)))
class SessionWithGapOnTime(object):
"""
Session window on time. You need to assign an alias for the window.
"""
def __init__(self, java_window):
self._java_window = java_window
def alias(self, alias: str) -> 'GroupWindow':
"""
Assigns an alias for this window that the following
:func:`~pyflink.table.GroupWindowedTable.group_by` and
:func:`~pyflink.table.WindowGroupedTable.select` clause can refer to.
:func:`~pyflink.table.WindowGroupedTable.select` statement can access window properties
such as window start or end time.
:param alias: Alias for this window.
:return: This window.
"""
return GroupWindow(get_method(self._java_window, "as")(alias))
class Slide(object):
"""
Helper class for creating a sliding window. Sliding windows have a fixed size and slide by
a specified slide interval. If the slide interval is smaller than the window size, sliding
windows are overlapping. Thus, an element can be assigned to multiple windows.
For example, a sliding window of size 15 minutes with 5 minutes sliding interval groups
elements of 15 minutes and evaluates every five minutes. Each element is contained in three
consecutive window evaluations.
Example:
::
>>> from pyflink.table.expressions import col, lit
>>> Slide.over(lit(10).minutes) \\
... .every(lit(5).minutes) \\
... .on(col("rowtime")) \\
... .alias("w")
"""
@classmethod
def over(cls, size: Expression) -> 'SlideWithSize':
"""
Creates a sliding window. Sliding windows have a fixed size and slide by
a specified slide interval. If the slide interval is smaller than the window size, sliding
windows are overlapping. Thus, an element can be assigned to multiple windows.
For example, a sliding window of size 15 minutes with 5 minutes sliding interval groups
elements of 15 minutes and evaluates every five minutes. Each element is contained in three
consecutive window evaluations.
:param size: The size of the window as time or row-count interval.
:return: A partially specified sliding window.
"""
return SlideWithSize(get_gateway().jvm.Slide.over(_get_java_expression(size)))
class SlideWithSize(object):
"""
Partially specified sliding window. The size of the window either as time or row-count
interval.
"""
def __init__(self, java_window):
self._java_window = java_window
def every(self, slide: Expression) -> 'SlideWithSizeAndSlide':
"""
Specifies the window's slide as time or row-count interval.
The slide determines the interval in which windows are started. Hence, sliding windows can
overlap if the slide is smaller than the size of the window.
For example, you could have windows of size 15 minutes that slide by 3 minutes. With this
15 minutes worth of elements are grouped every 3 minutes and each row contributes to 5
windows.
:param slide: The slide of the window either as time or row-count interval.
:return: A sliding window.
"""
return SlideWithSizeAndSlide(self._java_window.every(_get_java_expression(slide)))
class SlideWithSizeAndSlide(object):
"""
Sliding window. The size of the window either as time or row-count interval.
For streaming tables you can specify grouping by a event-time or processing-time attribute.
For batch tables you can specify grouping on a timestamp or long attribute.
"""
def __init__(self, java_window):
self._java_window = java_window
def on(self, time_field: Expression) -> 'SlideWithSizeAndSlideOnTime':
"""
Specifies the time attribute on which rows are grouped.
For streaming tables you can specify grouping by a event-time or processing-time
attribute.
For batch tables you can specify grouping on a timestamp or long attribute.
"""
return SlideWithSizeAndSlideOnTime(self._java_window.on(_get_java_expression(time_field)))
class SlideWithSizeAndSlideOnTime(object):
"""
Sliding window on time. You need to assign an alias for the window.
"""
def __init__(self, java_window):
self._java_window = java_window
def alias(self, alias: str) -> 'GroupWindow':
"""
Assigns an alias for this window that the following
:func:`~pyflink.table.GroupWindowedTable.group_by` and
:func:`~pyflink.table.WindowGroupedTable.select` clause can refer to.
:func:`~pyflink.table.WindowGroupedTable.select` statement can access window properties
such as window start or end time.
:param alias: Alias for this window.
:return: This window.
"""
return GroupWindow(get_method(self._java_window, "as")(alias))
class Over(object):
"""
Helper class for creating an over window. Similar to SQL, over window aggregates compute an
aggregate for each input row over a range of its neighboring rows.
Over-windows for batch tables are currently not supported.
Example:
::
>>> from pyflink.table.expressions import col, UNBOUNDED_RANGE
>>> Over.partition_by(col("a")) \\
... .order_by(col("rowtime")) \\
... .preceding(UNBOUNDED_RANGE) \\
... .alias("w")
"""
@classmethod
def order_by(cls, order_by: Expression) -> 'OverWindowPartitionedOrdered':
"""
Specifies the time attribute on which rows are ordered.
For streaming tables, reference a rowtime or proctime time attribute here
to specify the time mode.
:param order_by: Field reference.
:return: An over window with defined order.
"""
return OverWindowPartitionedOrdered(get_gateway().jvm.Over.orderBy(
_get_java_expression(order_by)))
@classmethod
def partition_by(cls, *partition_by: Expression) -> 'OverWindowPartitioned':
"""
Partitions the elements on some partition keys.
Each partition is individually sorted and aggregate functions are applied to each
partition separately.
:param partition_by: List of field references.
:return: An over window with defined partitioning.
"""
return OverWindowPartitioned(get_gateway().jvm.Over.partitionBy(
to_expression_jarray(partition_by)))
class OverWindowPartitionedOrdered(object):
"""
Partially defined over window with (optional) partitioning and order.
"""
def __init__(self, java_over_window):
self._java_over_window = java_over_window
def alias(self, alias: str) -> 'OverWindow':
"""
Set the preceding offset (based on time or row-count intervals) for over window.
:param alias: Preceding offset relative to the current row.
:return: An over window with defined preceding.
"""
return OverWindow(get_method(self._java_over_window, "as")(alias))
def preceding(self, preceding: Expression) -> 'OverWindowPartitionedOrderedPreceding':
"""
Set the preceding offset (based on time or row-count intervals) for over window.
:param preceding: Preceding offset relative to the current row.
:return: An over window with defined preceding.
"""
return OverWindowPartitionedOrderedPreceding(
self._java_over_window.preceding(_get_java_expression(preceding)))
class OverWindowPartitionedOrderedPreceding(object):
"""
Partially defined over window with (optional) partitioning, order, and preceding.
"""
def __init__(self, java_over_window):
self._java_over_window = java_over_window
def alias(self, alias: str) -> 'OverWindow':
"""
Assigns an alias for this window that the following
:func:`~pyflink.table.OverWindowedTable.select` clause can refer to.
:param alias: Alias for this over window.
:return: The fully defined over window.
"""
return OverWindow(get_method(self._java_over_window, "as")(alias))
def following(self, following: Expression) -> 'OverWindowPartitionedOrderedPreceding':
"""
Set the following offset (based on time or row-count intervals) for over window.
:param following: Following offset that relative to the current row.
:return: An over window with defined following.
"""
return OverWindowPartitionedOrderedPreceding(
self._java_over_window.following(_get_java_expression(following)))
class OverWindowPartitioned(object):
"""
Partially defined over window with partitioning.
"""
def __init__(self, java_over_window):
self._java_over_window = java_over_window
def order_by(self, order_by: Expression) -> 'OverWindowPartitionedOrdered':
"""
Specifies the time attribute on which rows are ordered.
For streaming tables, reference a rowtime or proctime time attribute here
to specify the time mode.
For batch tables, refer to a timestamp or long attribute.
:param order_by: Field reference.
:return: An over window with defined order.
"""
return OverWindowPartitionedOrdered(self._java_over_window.orderBy(
_get_java_expression(order_by)))
class OverWindow(object):
"""
An over window specification.
Similar to SQL, over window aggregates compute an aggregate for each input row over a range
of its neighboring rows.
"""
def __init__(self, java_over_window):
self._java_over_window = java_over_window
| 16,326 | 34.883516 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/table/serializers.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import io
from pyflink.serializers import IterableSerializer
from pyflink.table.utils import arrow_to_pandas, pandas_to_arrow
class ArrowSerializer(IterableSerializer):
"""
Serializes pandas.Series into Arrow streaming format data.
"""
def __init__(self, schema, row_type, timezone):
super(ArrowSerializer, self).__init__()
self._schema = schema
self._field_types = row_type.field_types()
self._timezone = timezone
def __repr__(self):
return "ArrowSerializer"
def serialize(self, iterable, stream):
writer = None
try:
for cols in iterable:
batch = pandas_to_arrow(self._schema, self._timezone, self._field_types, cols)
if writer is None:
import pyarrow as pa
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def deserialize(self, stream):
import pyarrow as pa
reader = pa.ipc.open_stream(stream)
for batch in reader:
yield arrow_to_pandas(self._timezone, self._field_types, [batch])
def load_from_iterator(self, iter):
class IteratorIO(io.RawIOBase):
def __init__(self, iter):
super(IteratorIO, self).__init__()
self.iter = iter
self.leftover = None
def readable(self):
return True
def readinto(self, b):
output_buffer_len = len(b)
input = self.leftover or (self.iter.next() if self.iter.hasNext() else None)
if input is None:
return 0
output, self.leftover = input[:output_buffer_len], input[output_buffer_len:]
b[:len(output)] = output
return len(output)
import pyarrow as pa
reader = pa.ipc.open_stream(
io.BufferedReader(IteratorIO(iter), buffer_size=io.DEFAULT_BUFFER_SIZE))
for batch in reader:
yield batch
| 3,095 | 37.7 | 94 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.