repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
hyperas
|
hyperas-master/examples/use_intermediate_functions.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
import matplotlib.pyplot as plt
def visualization_mnist(x_data,n=10):
plt.figure(figsize=(20, 4))
for i in range(n):
# display digit
ax = plt.subplot(1, n, i+1)
plt.imshow(x_data[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
def data():
'''
Data providing function:
This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
'''
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
visualization_mnist(X_test)
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
'''
Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
'''
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([256, 512, 1024])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
nb_epoch=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
X_train, Y_train, X_test, Y_test = data()
functions=[visualization_mnist]
best_run, best_model = optim.minimize(model=model,
data=data,
functions=functions,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 3,226 | 32.968421 | 87 |
py
|
hyperas
|
hyperas-master/examples/hyperas_in_intermediate_fns.py
|
import numpy
import random
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Dropout, Lambda
from keras.optimizers import RMSprop
from keras import backend as K
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def create_pairs(x, digit_indices):
num_classes = 10
pairs = []
labels = []
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[x[z1], x[z2]]]
inc = random.randrange(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[x[z1], x[z2]]]
labels += [1, 0]
return numpy.array(pairs), numpy.array(labels)
def create_base_network(input_shape,dense_filter1,dense_filter2,dense_filter3,dropout1,dropout2):
input = Input(shape=input_shape)
x = Flatten()(input)
x = Dense(dense_filter1, activation='relu')(x)
x = Dropout(dropout1)(x)
x = Dense(dense_filter2, activation='relu')(x)
x = Dropout(dropout2)(x)
x = Dense(dense_filter3, activation='relu')(x)
return Model(input, x)
def compute_accuracy(y_true, y_pred):
pred = y_pred.ravel() < 0.5
return numpy.mean(pred == y_true)
def accuracy(y_true, y_pred):
return K.mean(K.equal(y_true, K.cast(y_pred < 0.5, y_true.dtype)))
def process_data():
num_classes = 10
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
input_shape = x_train.shape[1:]
# create training+test positive and negative pairs
digit_indices = [numpy.where(y_train == i)[0] for i in range(num_classes)]
tr_pairs, tr_y = create_pairs(x_train, digit_indices)
digit_indices = [numpy.where(y_test == i)[0] for i in range(num_classes)]
te_pairs, te_y = create_pairs(x_test, digit_indices)
return tr_pairs, tr_y, te_pairs, te_y,input_shape
def data():
tr_pairs, tr_y, te_pairs, te_y,input_shape = process_data()
return tr_pairs, tr_y, te_pairs, te_y,input_shape
def contrastive_loss(y_true, y_pred):
margin = 1
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
def create_model(tr_pairs, tr_y, te_pairs, te_y,input_shape):
epochs = 20
dropout1 = {{uniform(0,1)}}
dropout2 = {{uniform(0,1)}}
dense_filter1 = {{choice([64,128,256])}}
dense_filter2 = {{choice([64,128,256])}}
dense_filter3 = {{choice([64,128,256])}}
# network definition
base_network = create_base_network(input_shape,dense_filter1,dense_filter2,dense_filter3,dropout1,dropout2)
input_a = Input(shape=input_shape)
input_b = Input(shape=input_shape)
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
rms = RMSprop()
model.compile(loss=contrastive_loss, optimizer=rms, metrics=[accuracy])
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs,
verbose=1,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))
y_pred = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
tr_acc = compute_accuracy(tr_y, y_pred)
y_pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
te_acc = compute_accuracy(te_y, y_pred)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
return {'loss': -te_acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
tr_pairs, tr_y, te_pairs, te_y,input_shape = data()
best_run, best_model = optim.minimize(model=create_model, data=data,
functions = [process_data,create_base_network,euclidean_distance,contrastive_loss,eucl_dist_output_shape,create_pairs,accuracy,compute_accuracy],
algo=tpe.suggest,max_evals=100,trials=Trials())
print("best model",best_model)
print("best run",best_run)
print("Evalutation of best performing model:")
loss,te_acc = best_model.evaluate([te_pairs[:, 0], te_pairs[:, 1]], te_y)
print("best prediction accuracy on test data %0.2f%%" % (100 * te_acc))
| 4,820 | 35.801527 | 149 |
py
|
hyperas
|
hyperas-master/examples/cnn_lstm.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, rand
from hyperas import optim
from hyperas.distributions import uniform, choice
import numpy as np
from keras.preprocessing import sequence
from keras.datasets import imdb
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.layers.convolutional import Convolution1D, MaxPooling1D
def data():
np.random.seed(1337) # for reproducibility
max_features = 20000
maxlen = 100
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
return X_train, X_test, y_train, y_test, maxlen, max_features
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
embedding_size = 300
pool_length = 4
lstm_output_size = 100
batch_size = 200
nb_epoch = 1
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout({{uniform(0, 1)}}))
# Note that we use unnamed parameters here, which is bad style, but is used here
# to demonstrate that it works. Always prefer named parameters.
model.add(Convolution1D({{choice([64, 128])}},
{{choice([6, 8])}},
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
best_run, best_model = optim.minimize(model=model,
data=data,
algo=rand.suggest,
max_evals=5,
trials=Trials())
print(best_run)
| 2,541 | 35.84058 | 84 |
py
|
hyperas
|
hyperas-master/examples/cifar_generator_cnn.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD
from keras.preprocessing.image import ImageDataGenerator
from keras.datasets import cifar10
from keras.utils import np_utils
def data():
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# this will do preprocessing and realtime data augmentation
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.1, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False) # randomly flip images
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(X_train)
return datagen, X_train, Y_train, X_test, Y_test
def model(datagen, X_train, Y_train, X_test, Y_test):
batch_size = 32
nb_epoch = 200
# input image dimensions
img_rows, img_cols = 32, 32
# the CIFAR10 images are RGB
img_channels = 3
model = Sequential()
model.add(Convolution2D(32, 3, 3, border_mode='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Convolution2D(32, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Convolution2D(64, 3, 3, border_mode='same'))
model.add(Activation('relu'))
model.add(Convolution2D(64, 3, 3))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# let's train the model using SGD + momentum (how original).
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
# fit the model on the batches generated by datagen.flow()
model.fit_generator(datagen.flow(X_train, Y_train,
batch_size=batch_size),
samples_per_epoch=X_train.shape[0],
nb_epoch=nb_epoch,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
if __name__ == '__main__':
datagen, X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=5,
trials=Trials())
print("Evalutation of best performing model:")
print(best_model.evaluate(X_test, Y_test))
| 4,262 | 36.394737 | 94 |
py
|
hyperas
|
hyperas-master/tests/test_functional_api.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice
from keras.models import Model
from keras.layers import Dense, Input
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
def data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
inputs = Input(shape=(784,))
x = Dense({{choice([20, 30, 40])}}, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def model_multi_line_arguments(X_train, Y_train,
X_test, Y_test):
inputs = Input(shape=(784,))
x = Dense({{choice([20, 30, 40])}}, activation='relu')(inputs)
x = Dense(64, activation='relu')(x)
predictions = Dense(10, activation='softmax')(x)
model = Model(inputs=inputs, outputs=predictions)
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_functional_api():
X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=Trials(),
verbose=False)
best_run, best_model = optim.minimize(model=model_multi_line_arguments,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=Trials(),
verbose=False)
| 3,058 | 35.416667 | 90 |
py
|
hyperas
|
hyperas-master/tests/test_lr_plateau.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.datasets import mnist
from keras.utils import np_utils
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
def data():
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test
def create_model(x_train, y_train, x_test, y_test):
model = Sequential()
model.add(Dense(44, input_shape=(784,)))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dense(44))
model.add(Activation({{choice(['relu', 'sigmoid'])}}))
model.add(Dense(10))
model.compile(loss='mae', metrics=['mse'], optimizer="adam")
es = EarlyStopping(monitor='val_loss', min_delta=1e-5, patience=10)
rlr = ReduceLROnPlateau(factor=0.1, patience=10)
_ = model.fit(x_train, y_train, epochs=1, verbose=0, callbacks=[es, rlr],
batch_size=24, validation_data=(x_test, y_test))
mae, mse = model.evaluate(x_test, y_test, verbose=0)
print('MAE:', mae)
return {'loss': mae, 'status': STATUS_OK, 'model': model}
def test_advanced_callbacks():
X_train, Y_train, X_test, Y_test = data()
best_run, best_model = optim.minimize(model=create_model,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=Trials(),
verbose=False)
| 2,006 | 34.839286 | 77 |
py
|
hyperas
|
hyperas-master/tests/test_distributions.py
| 0 | 0 | 0 |
py
|
|
hyperas
|
hyperas-master/tests/test_e2e.py
|
from __future__ import print_function
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
from hyperas.distributions import choice, uniform
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
from keras.datasets import mnist
from keras.utils import np_utils
from hyperopt import rand
def data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes = 10
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model(X_train, Y_train, X_test, Y_test):
model = Sequential()
model.add(Dense(50, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([20, 30, 40])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_simple():
X_train, Y_train, X_test, Y_test = data()
trials = Trials()
best_run, best_model = optim.minimize(model=model,
data=data,
algo=tpe.suggest,
max_evals=1,
trials=trials,
verbose=False)
def ensemble_data():
nb_classes = 10
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, X_test, Y_train, Y_test
def ensemble_model(X_train, X_test, Y_train, Y_test):
model = Sequential()
model.add(Dense(512, input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense({{choice([400, 512, 600])}}))
model.add(Activation('relu'))
model.add(Dropout({{uniform(0, 1)}}))
model.add(Dense(10))
model.add(Activation('softmax'))
rms = RMSprop()
model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['accuracy'])
nb_epoch = 10
batch_size = 128
model.fit(X_train, Y_train,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=2,
validation_data=(X_test, Y_test))
score, acc = model.evaluate(X_test, Y_test, verbose=0)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
def test_ensemble():
X_train, X_test, Y_train, Y_test = data()
optim.best_ensemble(nb_ensemble_models=2,
model=model,
data=data,
algo=rand.suggest,
max_evals=1,
trials=Trials(),
voting='hard')
| 3,714 | 31.587719 | 87 |
py
|
hyperas
|
hyperas-master/tests/test_ensemble.py
| 0 | 0 | 0 |
py
|
|
hyperas
|
hyperas-master/tests/test_utils.py
|
import os
from hyperopt import hp
from hyperas.utils import (
extract_imports, remove_imports, remove_all_comments, temp_string,
write_temp_files, with_line_numbers, determine_indent, unpack_hyperopt_vals,
eval_hyperopt_space, find_signature_end)
TEST_SOURCE = """
from __future__ import print_function
from sys import path
from os import walk as walk2
import os
import sys # ignore this comment
''' remove me '''
# import nocomment
from java.lang import stuff
from _pydev_ import stuff
from os.path import splitext as split
import os.path.splitext as sp
"""
TEST_SOURCE_2 = """
import sys
foo_bar()
"""
TEST_SOURCE_3 = """
def foo():
# a comment in a function
import sys
bar()
"""
TEST_SOURCE_4 = """
@foo_bar(bar_foo)
def foo(train_x=')\\':', train_y=")\\":", # ):
test_x=lambda x: bar, test_y=bar[:, 0],
foo='''
):):
\\'''', bar="") :
pass
"""
def test_extract_imports():
result = extract_imports(TEST_SOURCE)
assert 'java.lang' not in result
assert 'nocomment' not in result
assert '_pydev_' not in result
assert 'try:\n import os\nexcept:\n pass\n' in result
assert 'from sys import path' in result
assert 'from os import walk as walk2' in result
assert 'ignore' not in result
assert 'remove me' not in result
assert 'from __future__ import print_function' in result
assert 'from os.path import splitext as split' in result
assert 'import os.path.splitext as sp' in result
def test_remove_imports():
result = remove_imports(TEST_SOURCE_2)
assert 'foo_bar()' in result
def test_remove_imports_in_function():
result = remove_imports(TEST_SOURCE_3)
# test function should have 3 lines (including the comment)
assert len(result.split('\n')[1:-1]) == 3
assert 'def foo():' in result
assert '# a comment in a function' in result
assert 'bar()' in result
def test_remove_all_comments():
result = remove_all_comments(TEST_SOURCE)
assert 'ignore' not in result
assert 'nocomment' not in result
assert 'remove me' not in result
assert 'import sys' in result
def test_temp_string():
imports = 'imports\n'
model = 'model\n'
data = 'data\n'
functions = 'functions\n'
space = 'space'
result = temp_string(imports, model, data, functions, space)
assert result == "imports\nfrom hyperopt import fmin, tpe, hp, STATUS_OK, Trials\n" \
"functions\ndata\nmodel\n\nspace"
def test_write_temp_files():
string = 'foo_bar'
temp_file = './temp.py'
write_temp_files(string, temp_file)
assert os.path.isfile(temp_file)
os.remove(temp_file)
def test_with_line_numbers():
code = "def do_stuff(x):\n foo"
result = with_line_numbers(code)
print(result)
assert result == " 1: def do_stuff(x):\n 2: foo"
def test_determine_indent():
code = "def do_stuff(x):\n foo"
assert determine_indent(code) == ' '
code = "def do_stuff(x):\n foo"
assert determine_indent(code) == ' '
code = "def do_stuff(x):\n\tfoo"
assert determine_indent(code) == '\t'
def test_unpack_hyperopt_vals():
test_vals = {
'filters_conv_A': [0],
'filters_conv_B': [1],
'rate': [0.1553971698387464],
'units': [1],
'rate_1': [0.4114807190252343],
'lr': [2.0215692016654265e-05],
'momentum': [2],
'nesterov': [0]
}
result = {
'filters_conv_A': 0,
'filters_conv_B': 1,
'rate': 0.1553971698387464,
'units': 1,
'rate_1': 0.4114807190252343,
'lr': 2.0215692016654265e-05,
'momentum': 2,
'nesterov': 0
}
assert unpack_hyperopt_vals(test_vals) == result
def test_eval_hyperopt_space():
space = {
'filters_conv_A': hp.choice('filters_conv_A', [8, 16]),
'filters_conv_B': hp.choice('filters_conv_B', [16, 24]),
'rate': hp.uniform('rate', 0, 1),
'units': hp.choice('units', [96, 128, 192]),
'rate_1': hp.uniform('rate_1', 0, 1),
'lr': hp.uniform('lr', 1e-5, 1e-4),
'momentum': hp.choice('momentum', [0.5, 0.9, 0.999]),
'nesterov': hp.choice('nesterov', [True, False])
}
test_vals = {
'filters_conv_A': [0],
'filters_conv_B': [1],
'rate': [0.1553971698387464],
'units': [1],
'rate_1': [0.4114807190252343],
'lr': [2.0215692016654265e-05],
'momentum': [2],
'nesterov': [0]
}
test_vals_unpacked = {
'filters_conv_A': 0,
'filters_conv_B': 1,
'rate': 0.1553971698387464,
'units': 1,
'rate_1': 0.4114807190252343,
'lr': 2.0215692016654265e-05,
'momentum': 2,
'nesterov': 0
}
result = {
'filters_conv_A': 8,
'filters_conv_B': 24,
'rate': 0.1553971698387464,
'units': 128,
'rate_1': 0.4114807190252343,
'lr': 2.0215692016654265e-05,
'momentum': 0.999,
'nesterov': True
}
assert eval_hyperopt_space(space, test_vals) == result
assert eval_hyperopt_space(space, test_vals_unpacked) == result
def test_find_signature_end():
index = find_signature_end(TEST_SOURCE_4)
assert len(TEST_SOURCE_4) - 10, index
| 5,277 | 26.778947 | 89 |
py
|
hyperas
|
hyperas-master/tests/test_optim.py
|
from keras.datasets import mnist
from keras.utils import np_utils
from hyperas.optim import retrieve_data_string
def test_data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
nb_classes_return = 10
Y_train = np_utils.to_categorical(y_train, nb_classes_return)
Y_test = np_utils.to_categorical(y_test, nb_classes_return)
return X_train, Y_train, X_test, Y_test
def test_data_function():
result = retrieve_data_string(test_data, verbose=False)
assert 'return X_train, Y_train, X_test, Y_test' not in result
assert 'def data():' not in result
assert 'nb_classes_return = 10' in result
assert '(X_train, y_train), (X_test, y_test) = mnist.load_data()' in result
assert 'Y_test = np_utils.to_categorical(y_test, nb_classes_return)' in result
if __name__ == '__main__':
test_data_function()
| 1,049 | 31.8125 | 82 |
py
|
autoagora-agents
|
autoagora-agents-master/main.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import argparse
from sacred import SETTINGS
import experiment
from autoagora_agents import controller
from simulation import environment
# For good reason, sacred disallows modifying your config file in the code.
# However, our code does some clever stuff to make configs less verbose than they'd
# otherwise need to be, so we disable this check
SETTINGS.CONFIG.READ_ONLY_CONFIG = False # type: ignore
parser = argparse.ArgumentParser(description="Run experiments for autoagora")
parser.add_argument("-n", "--name")
parser.add_argument("-s", "--simulation_path", default="simulationconfig.py")
parser.add_argument("-a", "--algorithm_path", default="algorithmconfig.py")
parser.add_argument("-e", "--experiment_path", default="experimentconfig.py")
args = parser.parse_args()
ex = experiment.experiment(
name=args.name,
spath=args.simulation_path,
apath=args.algorithm_path,
epath=args.experiment_path,
)
@ex.automain
def main(_run):
# NOTE: The structure of this loop is very bandit-specific.
# This would not work for a more complex RL algorithm without
# modifications
seed = _run.config["experiment"]["seed"]
algs = controller(seed=seed) # type: ignore
env = environment(seed=seed) # type: ignore
for _ in range(env.nepisodes):
obs, act, rew, done = env.reset()
while not env.isfinished():
act = algs(observations=obs, actions=act, rewards=rew, dones=done)
algs.update()
obs, act, rew, done = env.step(actions=act)
| 1,600 | 33.804348 | 83 |
py
|
autoagora-agents
|
autoagora-agents-master/algorithmconfig.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from autoagora_agents import algorithm_ingredient
@algorithm_ingredient.config
def config():
agents = [
{
"kind": "ppobandit",
"group": "indexer",
"count": 1,
"bufferlength": 10,
"actiondistribution": {
"kind": "gaussian",
"initial_mean": [0.1],
"initial_stddev": [0.1],
"minmean": [0.0],
"maxmean": [2.0],
"minstddev": [1e-10],
"maxstddev": [1.0],
},
"optimizer": {"kind": "sgd", "lr": 0.01},
"ppoiterations": 2,
"epsclip": 0.01,
"entropycoeff": 1.0,
"pullbackstrength": 0.0,
"stddevfallback": True,
}
]
| 863 | 26 | 53 |
py
|
autoagora-agents
|
autoagora-agents-master/experimentconfig.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from experiment import experiment_ingredient
@experiment_ingredient.config
def config():
seed = 0
| 180 | 17.1 | 44 |
py
|
autoagora-agents
|
autoagora-agents-master/simulationconfig.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from simulation import simulation_ingredient
@simulation_ingredient.config
def config():
nproducts = 1 # This is a convenience variable. Does not actually get used by the simulation.
ntimesteps = 10000
nepisodes = 1
distributor = {"kind": "softmax", "source": "consumer", "to": "indexer"}
entities = [
{
"kind": "entity",
"count": 1,
"group": "consumer",
"state": {
"kind": "budget",
"low": 0,
"high": 1,
"initial": 0.5 * np.ones(nproducts),
"traffic": np.ones(nproducts),
},
},
{
"kind": "agent",
"count": 1,
"group": "indexer",
"state": {
"kind": "price",
"low": np.zeros(nproducts),
"high": 3 * np.ones(nproducts),
"initial": np.ones(nproducts),
},
"action": {
"kind": "price",
"low": np.zeros(nproducts),
"high": 3 * np.ones(nproducts),
"shape": (nproducts,),
},
"reward": [
{
"kind": "traffic",
"multiplier": 1,
}
],
"observation": [
{
"kind": "bandit",
}
],
},
]
| 1,548 | 26.175439 | 98 |
py
|
autoagora-agents
|
autoagora-agents-master/experiment/array.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
def applybounds(
a: np.ndarray, l: float | np.ndarray, h: float | np.ndarray
) -> np.ndarray:
"""Set out of bounds values to be between bounds.
Bounds are inclusive.
Arguments:
a (np.ndarray): The array to which to apply bounds.
l (float | np.ndarray): The lower bound
h (float | np.ndarray): The upper bound
Returns:
np.ndarray: The input array with the out of bounds values set to be in bounds.
Raises:
ValueError: If length of the bounds don't equal the length of the array, if the
bounds are given by arrays.
"""
return np.minimum(np.maximum(a, l), h)
| 741 | 26.481481 | 87 |
py
|
autoagora-agents
|
autoagora-agents-master/experiment/config.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import importlib.util
import sacred
def experiment(*, name: str, spath: str, apath: str, epath: str):
"""Create an experiment.
Keyword Arguments:
name (str): The name of the experiment
spath (str): The path to the python file containing the simulation config
apath (str): The path the python file containing the algorithm config
epath (str): The path the python file containing the experiment config
Returns:
sacred.Experiment: The constructed sacred experiment object.
"""
names = ("simulation_ingredient", "algorithm_ingredient", "experiment_ingredient")
paths = (spath, apath, epath)
ii = tuple(map(lambda n, p: importn(n, p), names, paths))
ex = sacred.Experiment(name, ingredients=ii)
return ex
def importn(n: str, p: str):
"""Import a given item from the python file at the specified path.
Arguments:
n (str): The name of the item to import
p (str): The path to the python file containing the item to import.
Returns:
sacred.Ingredient: The imported ingredient from the specified path
"""
# https://www.geeksforgeeks.org/how-to-import-a-python-module-given-the-full-path/
spec = importlib.util.spec_from_file_location(n, p)
mod = importlib.util.module_from_spec(spec) # type: ignore
spec.loader.exec_module(mod) # type: ignore
return getattr(mod, n)
| 1,479 | 33.418605 | 86 |
py
|
autoagora-agents
|
autoagora-agents-master/experiment/factory.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Callable
def factory(n: str, d: dict[str, Callable], *args, **kwargs) -> Any:
"""Construct an object from the factory.
Arguments:
n (str): The name of associated with the function to call.
d (dict[str, Callable]): A mapping between names and callables.
Returns:
Any: The value returned by the callable.
Raises:
NotImplementedError: If the dictionary does not contain the requested object constructor.
"""
try:
o = d[n](*args, **kwargs)
except KeyError:
raise NotImplementedError(
f"The requested type {n} has not yet been added to the factory."
)
return o
def decoratorfactoryhelper(*, kind: str, d: dict[str, Callable], **kwargs) -> Any:
"""Extract "kind" from the config.
Keyword Arguments:
kind (str): The kind of reward.
d (dict[str, Reward]): A mapping between names and callables
Returns:
Any: The value returned by the callable.
"""
return factory(kind, d, **kwargs)
| 1,128 | 26.536585 | 97 |
py
|
autoagora-agents
|
autoagora-agents-master/experiment/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import sacred
from experiment.array import applybounds
from experiment.config import experiment
from experiment.factory import decoratorfactoryhelper, factory
experiment_ingredient = sacred.Ingredient("experiment")
| 293 | 25.727273 | 62 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/fixture.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from simulation import environment
@pytest.fixture
def agentconfig():
return {
"kind": "agent",
"count": 7,
"group": "indexer",
"state": {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.ones(3),
},
"action": {
"kind": "pricemultiplier",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"baseprice": 2 * np.ones(3),
},
}
@pytest.fixture
def consumerconfig():
return {
"kind": "entity",
"count": 3,
"group": "consumer",
"state": {
"kind": "budget",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.ones(3),
"traffic": np.ones(3),
},
}
@pytest.fixture
def simulationconfig():
nproducts = 1
return {
"ntimesteps": 2,
"nepisodes": 1,
"distributor": {"kind": "softmax", "source": "consumer", "to": "indexer"},
"entities": [
{
"kind": "entity",
"count": 1,
"group": "consumer",
"state": {
"kind": "budget",
"low": 0,
"high": 1,
"initial": 0.5 * np.ones(nproducts),
"traffic": np.ones(nproducts),
},
},
{
"kind": "agent",
"count": 2,
"group": "indexer",
"state": {
"kind": "price",
"low": np.zeros(nproducts),
"high": 3 * np.ones(nproducts),
"initial": np.ones(nproducts),
},
"action": {
"kind": "price",
"low": np.zeros(nproducts),
"high": 3 * np.ones(nproducts),
"shape": (nproducts,),
},
"reward": [
{
"kind": "traffic",
"multiplier": 1,
}
],
"observation": [
{
"kind": "bandit",
}
],
},
],
}
@pytest.fixture
def env(simulationconfig):
return environment(
simulationconfig["distributor"],
simulationconfig["entities"],
simulationconfig["ntimesteps"],
simulationconfig["nepisodes"],
seed=0,
)
@pytest.fixture
def gaussianconfig():
return {
"kind": "gaussian",
"initial_mean": [1.0],
"initial_stddev": [0.5],
"minmean": [0.0],
"maxmean": [2.0],
"minstddev": [0.1],
"maxstddev": [1.0],
}
@pytest.fixture
def degenerateconfig():
return {
"kind": "degenerate",
"initial_value": [1.0],
"minvalue": [0.0],
"maxvalue": [2.0],
}
@pytest.fixture
def scaledgaussianconfig():
return {
"kind": "scaledgaussian",
"initial_mean": [1.0],
"initial_stddev": [1.0],
"minmean": [1.0],
"maxmean": [5.0],
"minstddev": [0.1],
"maxstddev": [1.0],
"scalefactor": [1.0],
}
@pytest.fixture
def predeterminedconfig():
return {
"kind": "predetermined",
"group": "indexer",
"count": 1,
"timestamps": [0, 3, 6],
"vals": [np.zeros(1), np.ones(1), 2 * np.ones(1)],
}
@pytest.fixture
def vpgbanditconfig():
return {
"kind": "vpgbandit",
"group": "indexer",
"count": 1,
"bufferlength": 2,
"actiondistribution": {
"kind": "gaussian",
"initial_mean": [1.0, 1.0, 1.0],
"initial_stddev": [0.1, 0.1, 0.1],
"minmean": [0.0, 0.0, 0.0],
"maxmean": [2.0, 2.0, 2.0],
"minstddev": [0.1, 0.1, 0.1],
"maxstddev": [1.0, 1.0, 1.0],
},
"optimizer": {"kind": "sgd", "lr": 0.001},
}
@pytest.fixture
def ppobanditconfig():
return {
"kind": "ppobandit",
"group": "indexer",
"count": 1,
"bufferlength": 2,
"actiondistribution": {
"kind": "gaussian",
"initial_mean": [1.0, 1.0, 1.0],
"initial_stddev": [0.1, 0.1, 0.1],
"minmean": [0.0, 0.0, 0.0],
"maxmean": [2.0, 2.0, 2.0],
"minstddev": [0.1, 0.1, 0.1],
"maxstddev": [1.0, 1.0, 1.0],
},
"optimizer": {"kind": "sgd", "lr": 0.001},
"ppoiterations": 2,
"epsclip": 0.1,
"entropycoeff": 1e-1,
"pullbackstrength": 1,
"stddevfallback": True,
}
@pytest.fixture
def rmppobanditconfig():
return {
"kind": "rmppobandit",
"group": "indexer",
"count": 1,
"bufferlength": 2,
"actiondistribution": {
"kind": "gaussian",
"initial_mean": [1.0, 1.0, 1.0],
"initial_stddev": [0.1, 0.1, 0.1],
"minmean": [0.0, 0.0, 0.0],
"maxmean": [2.0, 2.0, 2.0],
"minstddev": [0.1, 0.1, 0.1],
"maxstddev": [1.0, 1.0, 1.0],
},
"optimizer": {"kind": "sgd", "lr": 0.001},
"ppoiterations": 2,
"epsclip": 0.1,
"entropycoeff": 1e-1,
"pullbackstrength": 1,
"stddevfallback": True,
}
| 5,654 | 23.911894 | 82 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
| 75 | 24.333333 | 37 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/experiment/test_factory.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import experiment
from .helper import add, sub
@pytest.fixture
def d():
return {"add": add, "sub": sub}
def test_factory_executes_correct_function(d):
n = "add"
assert experiment.factory(n, d, 2, b=1, c=2) == 6
def test_factory_raises_notimplementederror(d):
n = "mul"
with pytest.raises(NotImplementedError):
_ = experiment.factory(n, d, 2, b=1, c=2)
def test_decoratorfactoryhelper_executes_correct_function(d):
assert experiment.decoratorfactoryhelper(kind="add", d=d, a=2, b=1, c=2) == 6
| 624 | 20.551724 | 81 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/experiment/test_config.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import pytest
import experiment
def test_importn():
assert experiment.config.importn("foo", "tests/experiment/helper.py") == "foo" # type: ignore
| 230 | 20 | 98 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/experiment/helper.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
foo = "foo"
def add(a, *, b, c):
return a * (b + c)
def sub(a, *, b, c):
return a * (b - c)
| 180 | 12.923077 | 37 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/experiment/test_array.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import experiment
def inbounds(a: np.ndarray, l: float | np.ndarray, h: float | np.ndarray) -> np.bool_:
"""Check if array is in between lower and upper bounds.
Bounds are inclusive.
Arguments:
a (np.ndarray): The array to check
l (float | np.ndarray): The lower bound
h (float | np.ndarray): The upper bound
Returns:
bool: True if array is in bounds, else False.
Raises:
ValueError: If length of the bounds don't equal the length of the array, if the
bounds are given by arrays.
"""
return ((a >= l) & (a <= h)).all()
def test_applybounds_float():
a = np.array([0, 2, 4])
l = 1
h = 3
assert inbounds(experiment.applybounds(a, l, h), l, h)
def test_applybounds_array():
a = np.array([1, 2, 3])
l = np.array([2, 3, 4])
h = np.array([2, 3, 4])
assert inbounds(experiment.applybounds(a, l, h), l, h)
def test_applybounds_empty():
a = np.array([])
l = 1
h = 3
assert inbounds(experiment.applybounds(a, l, h), l, h)
def test_applybounds_raises_valueerror():
a = np.array([])
l = np.array([1, 2])
h = 3
with pytest.raises(ValueError):
_ = experiment.applybounds(a, l, h)
| 1,341 | 22.54386 | 87 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/experiment/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
| 75 | 24.333333 | 37 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/test_dynamics.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from simulation.dynamics import *
from simulation.entity.action import *
from simulation.entity.state import *
def test_pricestate_priceaction_dynamics():
state = {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
}
s = statefactory(**state)
action = {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"seed": 0,
}
a = actionfactory(**action)
a.value = np.array([1, 2, 3])
dynamics(s, a) # type: ignore
assert (s.value == np.array([1, 2, 3])).all()
def test_pricestate_pricemultiplieraction_dynamics():
state = {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
}
s = statefactory(**state)
action = {
"kind": "pricemultiplier",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"baseprice": 0.1 * np.ones(3),
"seed": 0,
}
a = actionfactory(**action)
a.value = np.array([1, 2, 3])
dynamics(s, a) # type: ignore
assert np.allclose(s.value, np.array([0.1, 0.2, 0.3]))
def test_budgetstate_budgetaction_dynamics():
state = {
"kind": "budget",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
"traffic": np.ones(3),
}
s = statefactory(**state)
action = {
"kind": "budget",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"seed": 0,
}
a = actionfactory(**action)
a.value = np.array([1, 2, 3])
dynamics(s, a) # type: ignore
assert (s.value == np.array([1, 2, 3])).all()
| 1,846 | 23.959459 | 58 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/test_observation.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from simulation import observation
from simulation.entity.entity import entitygroupfactory
from ..fixture import *
def test_bandit_observation(agentconfig):
agentconfig["observation"] = [
{"kind": "bandit"},
]
entities = {"indexer": entitygroupfactory(**agentconfig)}
obs = observation.observationfactory(observations=agentconfig["observation"])
assert isinstance(obs, observation.BanditObservation)
agent = entities["indexer"][0]
assert np.allclose(obs(agent=agent, entities=entities), np.array([])) # type: ignore
| 652 | 30.095238 | 89 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/test_distributor.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from simulation import distributor
from simulation.entity import entitygroupfactory
from ..fixture import *
def test_softmaxdistributor_softmaxmask_degenerate():
# All values are False in the mask
# Should return zeros
x = np.ones((3, 2))
mask = np.full((3, 2), False)
dist = distributor.distributorfactory(
kind="softmax", source="consumer", to="indexer"
)
y = dist.softmaxmask(x, mask) # type: ignore
assert np.array_equal(y, np.zeros_like(x))
def test_softmaxdistributor_softmaxmask_partial_mask():
x = np.ones((3, 2))
mask = np.full((3, 2), False)
mask[0] = [True, True]
dist = distributor.distributorfactory(
kind="softmax", source="consumer", to="indexer"
)
y = dist.softmaxmask(x, mask) # type: ignore
expected = np.zeros_like(x)
expected[0] = [1.0, 1.0]
assert np.array_equal(y, expected)
def test_softmaxdistributor_softmaxmask_no_mask():
x = np.ones((2, 2))
mask = np.full((2, 2), True)
dist = distributor.distributorfactory(
kind="softmax", source="consumer", to="indexer"
)
y = dist.softmaxmask(x, mask) # type: ignore
expected = 0.5 * np.ones_like(x)
assert np.array_equal(y, expected)
def test_softmaxdistributor_softmaxmask_different_masks_per_column():
x = np.ones((2, 2))
mask = np.full((2, 2), True)
mask[1, 1] = False
dist = distributor.distributorfactory(
kind="softmax", source="consumer", to="indexer"
)
y = dist.softmaxmask(x, mask) # type: ignore
expected = 0.5 * np.ones_like(x)
expected[:, 1] = [1.0, 0.0]
assert np.array_equal(y, expected)
def test_softmaxdistributor_one_indexer_all_traffic(agentconfig, consumerconfig):
# Set up agents
agentconfig["count"] = 2
nproducts = 3
indexers = entitygroupfactory(**agentconfig)
indexers[0].state.value = np.zeros(nproducts) # One agent's price is zero
indexers[1].state.value = 5 * np.ones(nproducts) # Other agent's price > budget
consumers = entitygroupfactory(**consumerconfig)
entities = {"consumer": consumers, "indexer": indexers}
dist = distributor.distributorfactory(
kind="softmax", source="consumer", to="indexer"
)
dist(entities=entities)
assert sum(indexers[0].state.traffic) == 9
assert sum(indexers[1].state.traffic) == 0
def test_softmaxdistributor_all_indexers_over_budget(agentconfig, consumerconfig):
# Set up agents
agentconfig["count"] = 2
nproducts = 3
indexers = entitygroupfactory(**agentconfig)
# Both agents over budget
indexers[0].state.value = 5 * np.ones(nproducts)
indexers[1].state.value = 5 * np.ones(nproducts)
consumers = entitygroupfactory(**consumerconfig)
entities = {"consumer": consumers, "indexer": indexers}
dist = distributor.distributorfactory(
kind="softmax", source="consumer", to="indexer"
)
dist(entities=entities)
assert sum(indexers[0].state.traffic) == 0
assert sum(indexers[1].state.traffic) == 0
| 3,121 | 32.212766 | 84 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
| 75 | 24.333333 | 37 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/test_environment.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from simulation.distributor import SoftmaxDistributor
from ..fixture import *
def test_environment_construction(env):
# Two groups created
assert len(env.groups) == 2
assert isinstance(env.distributor, SoftmaxDistributor)
def test_environment_reset(env):
# Change the state of an indexer
env.groups["indexer"][0].state.value = np.array([3])
env.t = 100
_ = env.reset()
assert env.groups["indexer"][0].state.value == np.ones(1)
assert env.t == 0
def test_environment_agents(env):
agents = tuple(env.agents.keys())
assert agents == ("indexer",)
def test_environment_entities(env):
agents = tuple(env.entities.keys())
assert agents == ("consumer",)
def test_environment_agentslist(env):
agents = env.agentslist
assert len(agents) == 2
assert agents[0].group == "indexer"
def test_environment_observation(env):
assert env.observation["indexer_0"].size == 0
assert env.observation["indexer_1"].size == 0
def test_environment_reward(env):
assert env.reward["indexer_0"] == 0.0
assert env.reward["indexer_1"] == 0.0
env.groups["indexer"][0].state.traffic = np.array([2])
assert env.reward["indexer_0"] == 2.0
assert env.reward["indexer_1"] == 0.0
def test_environment_isfinished(env):
assert not env.isfinished()
env.t = 10
assert env.isfinished()
def test_environment_done(env):
assert not env.done["indexer_0"]
assert not env.done["indexer_1"]
env.t = 10
assert env.done["indexer_0"]
assert env.done["indexer_1"]
def test_environment_render(env):
with pytest.raises(NotImplementedError):
env.render()
def test_environment_close(env):
with pytest.raises(NotImplementedError):
env.close()
def test_environment_step(env):
# Change the price of an indexer
obs, act, rew, done = env.step(
actions={"indexer_0": np.array([0.25]), "indexer_1": np.array([3])}
)
assert env.groups["indexer"][0].state.value == np.array([0.25])
assert rew["indexer_0"] == 0.25
assert rew["indexer_1"] == 0.0
| 2,191 | 23.909091 | 75 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/test_reward.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from simulation import reward
from simulation.entity.entity import entitygroupfactory
from ..fixture import *
def test_traffic_reward(agentconfig):
agentconfig["reward"] = [
{"kind": "traffic", "multiplier": -1},
{"kind": "traffic", "multiplier": 2},
]
entities = {"indexer": entitygroupfactory(**agentconfig)}
rew = reward.rewardfactory(rewards=agentconfig["reward"])
assert isinstance(rew, reward.TrafficReward)
traffic = np.random.rand(3)
agent = entities["indexer"][0]
agent.state.traffic = traffic
# Mutipliers cancel out (-1 + 2 = 1)
assert rew(agent=agent, entities=entities) == sum(traffic) # type: ignore
def test_sumregretratio_reward(agentconfig, consumerconfig):
agentconfig["reward"] = [
{"kind": "sumregretratio", "multiplier": 1, "fromgroup": "consumer"},
]
entities = {
"indexer": entitygroupfactory(**agentconfig),
"consumer": entitygroupfactory(**consumerconfig),
}
rew = reward.rewardfactory(rewards=agentconfig["reward"])
assert isinstance(rew, reward.SumRegretRatio)
traffic = np.random.rand(3)
agent = entities["indexer"][0]
agent.state.traffic = traffic
assert rew(agent=agent, entities=entities) == sum(traffic) / 9 # type: ignore
| 1,378 | 32.634146 | 82 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/entity/test_action.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from simulation.entity import action
@pytest.fixture
def a():
return action.Action(low=0, high=3, shape=(3,), seed=0)
def test_action_init(a):
assert (a.space.low == np.zeros(3)).all()
assert (a.space.high == 3 * np.ones(3)).all()
def test_action_update(a):
a.value = np.array([3, 3, 3])
assert (a.value == np.array([3, 3, 3])).all()
def test_priceaction_factory():
config = {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"seed": 0,
}
a = action.actionfactory(**config)
assert isinstance(a, action.PriceAction)
def test_pricemultiplieraction_factory():
config = {
"kind": "pricemultiplier",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"baseprice": 2 * np.ones(3),
"seed": 0,
}
a = action.actionfactory(**config)
assert isinstance(a, action.PriceMultiplierAction)
def test_budgetaction_factory():
config = {
"kind": "budget",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"seed": 0,
}
s = action.actionfactory(**config)
assert isinstance(s, action.BudgetAction)
| 1,338 | 21.316667 | 59 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/entity/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
| 75 | 24.333333 | 37 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/entity/test_state.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from simulation.entity import state
@pytest.fixture
def s():
return state.State(low=0, high=3, initial=np.array([1, 2, 3]))
def test_state_init():
a = np.array([1, 2, 3])
s = state.State(low=0, high=3, initial=a)
assert (s.space.low == np.zeros(3)).all()
assert (s.space.high == 3 * np.ones(3)).all()
assert (s.value == a).all()
def test_state_initial_oob():
a = np.array([-1, -1, -1])
s = state.State(low=0, high=3, initial=a)
assert (s.value == np.array([0, 0, 0])).all()
def test_state_update(s):
s.value = np.array([3, 3, 3])
assert (s.value == np.array([3, 3, 3])).all()
def test_state_reset(s):
s.state = np.array([3, 3, 3])
s.reset()
assert (s.value == np.array([1, 2, 3])).all()
def test_pricestate_factory():
config = {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
}
s = state.statefactory(**config)
assert isinstance(s, state.PriceState)
def test_pricestate_fee():
config = {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
}
s = state.statefactory(**config)
prices = np.random.rand(3)
s.value = prices
s.traffic = np.ones(3)
assert (s.fee == sum(prices)).all() # type: ignore
def test_budgetstate_factory():
config = {
"kind": "budget",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
"traffic": np.ones(3),
}
s = state.statefactory(**config)
assert isinstance(s, state.BudgetState)
| 1,738 | 22.186667 | 66 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/simulation/entity/test_entity.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
from simulation.entity import Agent, Entity, entity
from simulation.entity.action import *
from simulation.entity.state import *
@pytest.fixture
def entityconfig():
return {
"kind": "entity",
"count": 5,
"group": "consumer",
"state": {
"kind": "budget",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
"traffic": np.ones(3),
},
}
@pytest.fixture
def agentconfig():
return {
"kind": "agent",
"count": 7,
"group": "indexer",
"state": {
"kind": "price",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"initial": np.zeros(3),
},
"action": {
"kind": "pricemultiplier",
"low": np.zeros(3),
"high": 3 * np.ones(3),
"shape": (3,),
"baseprice": 2 * np.ones(3),
},
}
def test_entity_init(entityconfig):
es = entity.entitygroupfactory(**entityconfig)
assert isinstance(es[0], Entity)
assert len(es) == 5
assert isinstance(es[0].state, BudgetState)
def test_agent_init(agentconfig):
ags = entity.entitygroupfactory(**agentconfig)
assert isinstance(ags[0], Agent)
assert len(ags) == 7
assert isinstance(ags[0].state, PriceState)
assert isinstance(ags[0].action, PriceMultiplierAction)
| 1,523 | 23.190476 | 59 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/autoagora_agents/test_algorithm.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import pytest
import torch
from autoagora_agents import algorithm
from ..fixture import *
def test_predetermined(predeterminedconfig):
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
obs = np.zeros(1)
rew = 1
act = np.zeros(1)
for i in range(10):
act = agent(observation=obs, action=act, reward=rew, done=False)
if i < 3:
assert np.array_equiv(np.zeros(1), act)
elif i >= 6:
assert np.array_equiv(2 * np.ones(1), act)
else:
assert np.array_equiv(np.ones(1), act)
assert agent.niterations == 10
agent.reset()
assert agent.niterations == 0
def test_predetermined_nonzero_first_timestamp(predeterminedconfig):
predeterminedconfig["timestamps"] = [5, 10, 15]
with pytest.raises(ValueError):
_ = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
def test_predetermined_different_length_lists(predeterminedconfig):
predeterminedconfig["timestamps"] = [0, 10]
with pytest.raises(ValueError):
_ = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
def test_advantage_reward_std_nan(predeterminedconfig):
# The config here doesn't matter. We just need to set up some agent to get access to the advantage static method
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
rewards = torch.as_tensor([1.0])
adv = agent.advantage(rewards)
assert adv == rewards.unsqueeze(dim=1)
def test_advantage_reward_std_zero(predeterminedconfig):
# The config here doesn't matter. We just need to set up some agent to get access to the advantage static method
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
rewards = torch.as_tensor([1.0, 1.0])
adv = agent.advantage(rewards)
assert all(adv == rewards.unsqueeze(dim=1))
def test_advantage_reward_std_nonzero(predeterminedconfig):
# The config here doesn't matter. We just need to set up some agent to get access to the advantage static method
agent = algorithm.algorithmgroupfactory(**predeterminedconfig)[0]
for _ in range(100):
rewards = torch.randint(-100, 100, (10,), dtype=torch.float32)
adv = agent.advantage(rewards)
# Our definintion of advantage here is essentially just standardising a gaussian
assert torch.allclose(adv.mean(), torch.zeros(1), atol=1e-2)
assert torch.allclose(adv.std(), torch.ones(1), atol=1e-2)
def test_bandit_call(vpgbanditconfig):
agent = algorithm.algorithmgroupfactory(**vpgbanditconfig)[0]
obs = np.zeros(1)
act = np.zeros(1)
rew = 1
done = False
act = agent(observation=obs, action=act, reward=rew, done=done)
assert len(agent.buffer) == 1 # type: ignore
act = agent(observation=obs, action=act, reward=rew, done=done)
assert len(agent.buffer) == 2 # type: ignore
# Buffer is a deque, so shouldn't fill more
act = agent(observation=obs, action=act, reward=rew, done=done)
assert len(agent.buffer) == 2 # type: ignore
| 3,131 | 36.285714 | 116 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/autoagora_agents/test_distribution.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from autoagora_agents import distribution
from ..fixture import *
def test_gaussiandistribution_reset(gaussianconfig):
dist = distribution.distributionfactory(**gaussianconfig)
v = dist.mean # type: ignore
dist._mean = torch.tensor([2.0]) # type: ignore
assert not torch.allclose(v, dist.mean) # type: ignore
dist.reset()
assert torch.allclose(v, dist.mean) # type: ignore
def test_gaussiandistribution_clamping(gaussianconfig):
dist = distribution.distributionfactory(**gaussianconfig)
dist._mean = torch.tensor([5.0]) # type: ignore
assert torch.allclose(dist.mean, torch.tensor([2.0])) # type: ignore
dist._logstddev = torch.tensor([5.0]) # type: ignore
assert torch.allclose(dist.stddev, torch.tensor([1.0])) # type: ignore
def test_gaussiandistribution_sample(gaussianconfig):
dist = distribution.distributionfactory(**gaussianconfig)
samples = torch.tensor([dist.sample() for _ in range(1000)])
assert torch.allclose(torch.std(samples), torch.tensor(0.5), atol=1e-1)
assert torch.allclose(torch.mean(samples), torch.tensor(1.0), atol=1e-1)
def test_degeneratedistribution_reset(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
v = dist.mean # type: ignore
dist._mean = torch.tensor([2.0]) # type: ignore
dist.reset()
assert torch.allclose(v, dist.mean) # type: ignore
def test_degeneratedistribution_clamping(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
dist._value = torch.tensor([5.0]) # type: ignore
assert torch.allclose(dist.mean, torch.tensor([2.0])) # type: ignore
def test_degeneratedistribution_sample(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
samples = torch.tensor([dist.sample() for _ in range(10)])
assert torch.sum(samples) == 10
def test_degeneratedistribution_entropy(degenerateconfig):
dist = distribution.distributionfactory(**degenerateconfig)
assert torch.sum(dist.entropy()) == 0
def test_scaledgaussiandistribution_reset(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
v = dist.mean # type: ignore
dist._mean = torch.tensor([2.0]) # type: ignore
assert not torch.allclose(v, dist.mean) # type: ignore
dist.reset()
assert torch.allclose(v, dist.mean) # type: ignore
def test_scaledgaussiandistribution_clamping(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
dist._mean = torch.tensor([-1.0]) # type: ignore
assert torch.allclose(dist.mean, torch.tensor([0.0])) # type: ignore
dist._logstddev = torch.tensor([-100.0]) # type: ignore
assert torch.allclose(dist.stddev, torch.tensor([0.1])) # type: ignore
def test_scaledgaussiandistribution_unscaledsample(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
samples = torch.tensor([dist.unscaledsample() for _ in range(1000)]) # type: ignore
assert torch.allclose(torch.std(samples), torch.tensor(1.0), atol=1e-1)
assert torch.allclose(torch.mean(samples), torch.tensor(0.0), atol=1e-1)
def test_scaledgaussiandistribution_scale(scaledgaussianconfig):
dist = distribution.distributionfactory(**scaledgaussianconfig)
torch.allclose(dist.scale(torch.tensor([0.0])), torch.tensor([1.0])) # type: ignore
| 3,506 | 39.310345 | 88 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/autoagora_agents/test_buffer.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import torch
from autoagora_agents import buffer
def test_buffer():
maxlen = 10
b = buffer.buffer(maxlength=maxlen)
sample = {
"reward": torch.as_tensor([1, 2, 3]),
"action": torch.as_tensor([3, 2, 1]),
}
assert len(b) == 0
b.append(sample) # type: ignore
assert len(b) == 1
for _ in range(maxlen + 1):
b.append(sample) # type: ignore
assert buffer.isfull(b)
b.clear()
assert not buffer.isfull(b)
| 549 | 19.37037 | 45 |
py
|
autoagora-agents
|
autoagora-agents-master/tests/autoagora_agents/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
| 75 | 24.333333 | 37 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/environment.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from typing import Any
import gymnasium
import numpy as np
from simulation.distributor import distributorfactory
from simulation.dynamics import dynamics
from simulation.entity import Agent, Entity, entitygroupfactory
from simulation.observation import observationfactory
from simulation.reward import rewardfactory
class Environment(gymnasium.Env):
"""The AutoAgora Environment.
Keyword Arguments:
distributor (dict[str, Any]): The config for the distributor.
entities (list[dict[str, Any]]): The configs for each group of entities.
Attributes:
groups (dict[str, list[Entity]]): A mapping from group names to the entities in
that group.
nepisodes (int): How many episodes to run.
ntimesteps (int): How many timesteps to run each episode for.
t (int): The current timestep.
_rewards (dict[str, Reward]): A mapping from group names to the reward function
of entities in that group.
_observations (dict[str, Observation]) A mapping from group names to that group's
observation function.
seed (int): The random seed.
"""
def __init__(
self,
*,
distributor: dict[str, Any],
entities: list[dict[str, Any]],
ntimesteps: int,
nepisodes: int,
seed: int,
) -> None:
super().__init__()
# Create entities
self.groups = {e["group"]: entitygroupfactory(**e) for e in entities}
self.nepisodes = nepisodes
self.ntimesteps = ntimesteps
self.t = 0
self.seed = seed
self._rewards = {
e["group"]: rewardfactory(rewards=e["reward"])
for e in entities
if e["kind"] == "agent"
}
self._observations = {
e["group"]: observationfactory(observations=e["observation"])
for e in entities
if e["kind"] == "agent"
}
self.distributor = distributorfactory(**distributor)
def reset(
self,
) -> tuple[
dict[str, np.ndarray], dict[str, np.ndarray], dict[str, float], dict[str, bool]
]:
"""Reset the environment.
Returns:
observation (dict[str, np.ndarray]): The observations of the agents.
Each entry in the dictionary maps an agent to its observation.
reward (dict[str, float]): The rewards of the agents. Each entry in the
dictionary maps an agent to its reward.
done (dict[str, bool]): False if an agent is not done. True if it is. Each
entry in the dictionary maps an agent to its done state.
"""
self.t = 0
for group in self.groups.values():
for entity in group:
entity.reset()
# No reward for first timestep
reward = {a.name: 0.0 for a in self.agentslist}
return self.observation, self.action, reward, self.done
def step(
self, *, actions: dict[str, np.ndarray]
) -> tuple[
dict[str, np.ndarray], dict[str, np.ndarray], dict[str, float], dict[str, bool]
]:
"""Step the environment forward given a set of actions.
Keyword Arguments:
actions (dict[str, list[np.ndarray]]): The action of each agent.
The mapping is between group names and lists of actions.
Returns:
observation (dict[str, np.ndarray]): The observations of the agents.
Each entry in the dictionary maps an agent to its observation.
action (dict[str, np.ndarray]): The actions of the agents.
Each entry in the dictionary maps an agent to its action.
reward (dict[str, float]): The rewards of the agents. Each entry in the
dictionary maps an agent to its reward.
done (dict[str, bool]): False if an agent is not done. True if it is. Each
entry in the dictionary maps an agent to its done state.
"""
self.t += 1
# Update agent actions
for agent in self.agentslist:
agent.action.value = actions[agent.name]
# Update states
for agent in self.agentslist:
dynamics(agent.state, agent.action) # type: ignore
self.distributor(entities=self.groups)
return self.observation, self.action, self.reward, self.done
def render(self):
"""Rendering is not part of the simulation framework."""
raise NotImplementedError("Rendering is handled by a separate library.")
def close(self):
"""Closing is not part of the simulation framework."""
raise NotImplementedError
@property
def agents(self) -> dict[str, list[Agent]]:
"""The agents in the environment."""
return {k: v for (k, v) in self.groups.items() if type(v[0]) == Agent} # type: ignore
@property
def agentslist(self) -> list[Agent]:
"""The agents in the environment as a list."""
ags = []
for group in self.agents.values():
ags.extend(group)
return ags
@property
def entities(self) -> dict[str, list[Entity]]:
"""The entities in the environment."""
return {k: v for (k, v) in self.groups.items() if type(v[0]) == Entity} # type: ignore
@property
def observation(self) -> dict[str, np.ndarray]:
"""The observations of all agents in the environment."""
d = {}
for (group, ags) in self.agents.items():
obsfn = self._observations[group]
for a in ags:
d[a.name] = obsfn(agent=a, entities=self.groups)
return d
@property
def reward(self) -> dict[str, float]:
"""The rewards of all agents in the environment."""
d = {}
for (group, ags) in self.agents.items():
rewfn = self._rewards[group]
for a in ags:
d[a.name] = rewfn(agent=a, entities=self.groups)
return d
@property
def done(self) -> dict[str, bool]:
"""Whether each agent is done.
In our case, agents are only done if the episode is finished.
"""
d = {}
for a in self.agentslist:
d[a.name] = self.isfinished()
return d
@property
def action(self) -> dict[str, np.ndarray]:
"""Each agent's action."""
d = {}
for a in self.agentslist:
d[a.name] = a.action.value
return d
def isfinished(self) -> bool:
"""True if t >= ntimesteps. Else false."""
return self.t >= self.ntimesteps
| 6,681 | 33.984293 | 95 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/distributor.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABC, abstractmethod
import numpy as np
import experiment
from simulation.entity import Entity
class Distributor(ABC):
"""The indexer selection algorithm base class.
Attributes:
source (str): The group from which the query comes. E.g., "consumer"
to (str): The group to which the query goes. E.g., "indexer"
"""
def __init__(self, *, source: str, to: str) -> None:
super().__init__()
self.source = source
self.to = to
@abstractmethod
def __call__(self, *, entities: dict[str, list[Entity]]) -> None:
"""Choose how to allocate traffic from `source` to `to`.
Keyword Arguments:
entities (dict[str, list[Entity]]): A mapping from group names to entities
in said group.
"""
pass
class SoftmaxDistributor(Distributor):
"""Allocates traffic via a softmax function.
However, if an indexer's price exceeds a consumer's budget, the indexer gets 0 traffic.
Attributes:
minprice (float): A large, negative price so as to ensure an indexer doesn't receive
traffic.
"""
def __init__(self, *, source: str, to: str) -> None:
super().__init__(source=source, to=to)
self.minprice = -1e20
@staticmethod
def softmaxmask(x: np.ndarray, mask: np.ndarray) -> np.ndarray:
"""Compute the columnwise softmax for elements that are True in the mask.
Arguments:
x (np.ndarray): The array for which to compute the softmax.
mask (np.ndarray): The mask array. The function works for True values.
Returns:
np.ndarray: An array in which the indices that are True in the mask are
columnwise softmaxed, and the indices that are False are zeroed.
"""
x = np.atleast_2d(x)
mask = np.atleast_2d(mask)
y = np.zeros_like(x)
# Iterate over columns
for j in range(x.shape[1]):
# Get masked column
xmask = x[:, j][mask[:, j]]
if xmask.size <= 0:
continue
# Subtract max for numerical stability as np.exp(inf) is inf
# but np.exp(-inf) is 0
ex = np.exp(xmask - np.max(xmask))
# Set value into masked indices
y[:, j][mask[:, j]] = ex / np.sum(ex)
return y
def __call__(self, *, entities: dict[str, list[Entity]]) -> None:
source = entities[self.source]
to = entities[self.to]
prices = np.atleast_2d(np.vstack([t.state.value for t in to]))
traffics = np.zeros_like(prices)
for s in source:
budget = s.state.value
# If above budget, don't get any traffic
mask = prices <= budget
# Compute how much traffic goes to each agent below the budget
percenttraffic = self.softmaxmask(prices, mask)
# Accumulate traffic values per agent
traffics += np.multiply(percenttraffic, s.state.traffic)
for traffic, t in zip(traffics, to):
t.state.traffic = traffic
def distributorfactory(*, kind: str, source: str, to: str, **kwargs) -> Distributor:
"""Instantiate a new Distributor.
Keyword Arguments:
kind (str): The type of Distributor to instantiate.
"softmax" -> SoftmaxDistributor
source (str): The group from which the query comes. E.g., "consumer"
to (str): The group to which the query goes. E.g., "indexer"
Returns:
Distributor: An instantiated Distributor.
"""
distributors = {"softmax": SoftmaxDistributor}
return experiment.factory(kind, distributors, source=source, to=to, **kwargs)
| 3,794 | 33.5 | 92 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/reward.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import experiment
from simulation.entity import Agent, Entity
class Reward:
"""The Reward base class."""
def __init__(self) -> None:
pass
def __call__(self, *, agent: Agent, entities: dict[str, list[Entity]]) -> float:
"""Compute the reward.
Keyword Arguments:
agent (Agent): The agent for whom to compute the reward.
entities (dict[str, list[Entity]]): The entities in the environment.
Returns:
float: The agent's reward.
"""
return 0.0
class RewardDecorator(Reward):
"""The base class for reward decorators.
Attributes:
reward (Reward): An instance of :class:`Reward`
multiplier (float): The reward is scaled by this value.
Make this negative to make the reward into a penalty.
"""
def __init__(self, *, reward: Reward, multiplier: float) -> None:
super().__init__()
self._reward = reward
self.multiplier = multiplier
@property
def reward(self) -> Reward:
return self._reward
def __call__(self, *, agent: Agent, entities: dict[str, list[Entity]]) -> float:
return self._reward(agent=agent, entities=entities) * self.multiplier
class TrafficReward(RewardDecorator):
"""A reward based on how much traffic the agent sends/receives."""
def __init__(self, *, reward: Reward, multiplier: float) -> None:
super().__init__(reward=reward, multiplier=multiplier)
def __call__(self, *, agent: Agent, entities: dict[str, list[Entity]]) -> float:
return agent.state.fee * self.multiplier + self._reward( # type: ignore
agent=agent, entities=entities
)
class SumRegretRatio(RewardDecorator):
"""A reward based on the fees earned over the total possible fees.
Attributes:
fromgroup (str): The group name of the entities paying for queries. Probably
"consumer" or something similar.
"""
def __init__(self, *, reward: Reward, multiplier: float, fromgroup: str) -> None:
super().__init__(reward=reward, multiplier=multiplier)
self.fromgroup = fromgroup
def __call__(self, *, agent: Agent, entities: dict[str, list[Entity]]) -> float:
consumers = entities[self.fromgroup]
# How much the agent could have earned. Each consumer's budget * the number of queries they sent
denom = np.sum([np.multiply(c.state.value, c.state.traffic) for c in consumers])
val = (agent.state.fee / denom) * self.multiplier # type: ignore
return val + self._reward(agent=agent, entities=entities)
def rewardfactory(*, rewards: list[dict]) -> Reward:
"""Instantiate a reward object.
Keyword Arguments:
rewards (list[dict]): A list of the configs for each reward that make up the
aggregate reward. Each config must contain the "kind" keyword, wherein
"kind" can be:
"traffic"
"sumregretratio"
Returns:
Reward: The reward object
"""
rdict = {"traffic": TrafficReward, "sumregretratio": SumRegretRatio}
r = Reward()
for config in rewards:
config["reward"] = r
r = experiment.decoratorfactoryhelper(d=rdict, **config) # type: ignore
return r
| 3,377 | 31.796117 | 104 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/observation.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import experiment
from simulation.entity import Agent, Entity
class Observation:
"""The Observation base class."""
def __init__(self) -> None:
pass
def __call__(
self, *, agent: Agent, entities: dict[str, list[Entity]]
) -> np.ndarray:
"""Generate the agent's observation.
Keyword Arguments:
agent (Agent): The agent for whom to generate the observation.
entities (dict[str, list[Entity]]): The entities in the environment.
Returns:
np.ndarray: The agent's observation.
"""
return np.array([])
class ObservationDecorator(Observation):
"""The base class for observation decorators.
Attributes:
observation (Observation): An instance of :class:`Observation`
"""
def __init__(self, *, observation: Observation) -> None:
super().__init__()
self._observation = observation
@property
def observation(self) -> Observation:
return self._observation
def __call__(
self, *, agent: Agent, entities: dict[str, list[Entity]]
) -> np.ndarray:
return self._observation(agent=agent, entities=entities)
class BanditObservation(ObservationDecorator):
"""An empty observation for bandits."""
def __init__(self, *, observation: Observation) -> None:
super().__init__(observation=observation)
def __call__(
self, *, agent: Agent, entities: dict[str, list[Entity]]
) -> np.ndarray:
return np.append(
np.array([]), self._observation(agent=agent, entities=entities)
)
def observationfactory(*, observations: list[dict]) -> Observation:
"""Instantiate an observation object.
Keyword Arguments:
observations (list[dict]): A list of the configs for each observation component
that makes up the full observation. Each config must contain the "kind"
keyword, wherein "kind" can be:
"bandit"
Returns:
Observation: The observation object
"""
rdict = {"bandit": BanditObservation}
o = Observation()
for config in observations:
config["observation"] = o
o = experiment.decoratorfactoryhelper(d=rdict, **config) # type: ignore
return o
| 2,372 | 26.917647 | 87 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from typing import Any
import numpy as np
import sacred
from simulation.environment import Environment
simulation_ingredient = sacred.Ingredient("simulation")
@simulation_ingredient.capture
def environment(
distributor: dict[str, Any],
entities: list[dict[str, Any]],
ntimesteps: int,
nepisodes: int,
seed: int,
) -> Environment:
"""Construct an environment from the simulation config.
Arguments:
distributor (dict[str, Any]): The config for the query distributor.
entities (list[dict[str, Any]]): The configs for each group of entities.
nepisodes (int): How many episodes to run.
ntimesteps (int): How many timesteps to run each episode for.
seed (int): The random seed.
Returns:
Environment: An instantiated simulation environment.
"""
return Environment(
distributor=distributor,
entities=entities,
ntimesteps=ntimesteps,
nepisodes=nepisodes,
seed=seed,
)
| 1,074 | 25.219512 | 80 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/dynamics.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from multipledispatch import dispatch
from simulation.entity.action import *
from simulation.entity.state import *
@dispatch(PriceState, PriceAction)
def dynamics(s: PriceState, a: PriceAction) -> None: # type: ignore
"""Update the state given the action.
In this case, the new state is just the new action.
Arguments:
s (PriceState): The previous state
a (PriceAction): The current action
"""
s.value = a.value
@dispatch(PriceState, PriceMultiplierAction)
def dynamics(s: PriceState, a: PriceMultiplierAction) -> None: # type: ignore
"""Update the state given the action.
In this case, the new state is the price multipliers times the base price.
Arguments:
s (PriceState): The previous state
a (PriceAction): The current action
"""
s.value = a.value * a.baseprice
@dispatch(BudgetState, BudgetAction)
def dynamics(s: BudgetState, a: BudgetAction) -> None: # type: ignore
"""Update the state given the action.
In this case, the new state is just the new action.
Arguments:
s (PriceState): The previous state
a (PriceAction): The current action
"""
s.value = a.value
| 1,267 | 25.978723 | 78 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/entity/state.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import gymnasium
import numpy as np
from numpy.typing import NDArray
import experiment
class State:
"""State of an entity.
Attributes:
low (float | NDArray): The lower bound of the state space.
high (float | NDArray): The upper bound of the state space
initial (NDArray): The initial value of the state
value (NDArray): The state of the entity
traffic (NDArray): Either how many queries the agent received in the last timestep
or how many it sent out. Each element of the array is a different query type.
"""
def __init__(
self, *, low: float | NDArray, high: float | NDArray, initial: NDArray
) -> None:
self.space = gymnasium.spaces.Box(low, high, shape=np.shape(initial))
self.initial = initial
self._state = np.zeros_like(initial)
self.value = initial
self.traffic = np.zeros_like(initial)
def reset(self) -> None:
"""Reset the state."""
self.value = self.initial
self.traffic = np.zeros_like(self.initial)
@property
def value(self) -> NDArray:
return self._state
@value.setter
def value(self, v: NDArray) -> None:
v = experiment.applybounds(v, self.space.low, self.space.high) # type: ignore
self._state = v
class PriceState(State):
"""The price of each query type.
Use "price" as the "kind" of state in the config.
"""
def __init__(self, *, low: float, high: float, initial: NDArray) -> None:
super().__init__(low=low, high=high, initial=initial)
@property
def fee(self) -> float:
"""The query fees earned by the agent."""
return sum(np.multiply(self.value, self.traffic))
class BudgetState(State):
"""The budget across all queries.
The default budget in the studio is 0.003.
Use "budget" as the "kind" of state in the config.
Attributes:
initialtraffic (NDArray): The initial value of the traffic vector.
"""
def __init__(
self, *, low: float, high: float, initial: NDArray, traffic: NDArray
) -> None:
super().__init__(low=low, high=high, initial=initial)
self.initialtraffic = traffic
self.traffic = traffic
def reset(self) -> None:
"""Reset the state."""
self.value = self.initial
self.traffic = self.initialtraffic
def statefactory(
*, kind: str, low: float, high: float, initial: NDArray, **kwargs
) -> State:
"""Instantiate a new state.
Keyword Arguments:
kind (str): The type of state to instantiate.
"price" -> PriceState
"budget" -> BudgetState
low (float): The lower bound of the state space
high (float): The upper bound of the state space
initial (NDArray): The initial value of the state.
Returns:
State: An instantiated state.
"""
states = {"price": PriceState, "budget": BudgetState}
return experiment.factory(
kind, states, low=low, high=high, initial=initial, **kwargs
)
| 3,132 | 28.556604 | 90 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/entity/action.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import gymnasium
import numpy as np
import experiment
class Action:
"""Action of an entity.
Keyword Arguments:
low (float | np.ndarray): The lower bound of the action space
high (float | np.ndarray): The upper bound of the action space
shape (tuple[int, ...]): The shape of the action space
seed (int): The seed of the random action generator.
Attributes:
space (gymnasium.spaces.Box): The action space.
"""
def __init__(
self,
*,
low: float | np.ndarray,
high: float | np.ndarray,
shape: tuple[int, ...],
seed: int
) -> None:
self.space = gymnasium.spaces.Box(low, high, shape=shape, seed=seed)
self._action = self.space.sample()
@property
def value(self) -> np.ndarray:
"""np.ndarray: The action of the entity."""
return self._action
@value.setter
def value(self, v: np.ndarray) -> None:
v = experiment.applybounds(v, self.space.low, self.space.high) # type: ignore
self._action = v
class PriceAction(Action):
"""The price of each query type.
Use "price" as the "kind" of action in the config.
"""
def __init__(
self, *, low: float, high: float, shape: tuple[int, ...], seed: int
) -> None:
super().__init__(low=low, high=high, shape=shape, seed=seed)
class PriceMultiplierAction(Action):
"""The price multiplier and base price of each query type.
Use "pricemultiplier" as the "kind" of action in the config.
Attributes:
baseprice (np.ndarray): The base price for each product.
"""
def __init__(
self,
*,
low: float,
high: float,
shape: tuple[int, ...],
seed: int,
baseprice: np.ndarray
) -> None:
super().__init__(low=low, high=high, shape=shape, seed=seed)
self.baseprice = baseprice
@property
def value(self) -> np.ndarray:
return self._action
@value.setter
def value(self, v: np.ndarray) -> None:
v = experiment.applybounds(v, self.space.low, self.space.high) # type: ignore
self._action = v
class BudgetAction(Action):
"""The budget across all queries.
Use "budget" as the "kind" of action in the config.
"""
def __init__(
self, *, low: float, high: float, shape: tuple[int, ...], seed: int
) -> None:
super().__init__(low=low, high=high, shape=shape, seed=seed)
def actionfactory(
*, kind: str, low: float, high: float, shape: tuple[int, ...], seed: int, **kwargs
) -> Action:
"""Instantiate a new action.
Keyword Arguments:
kind (str): The type of action to instantiate.
"price" -> PriceAction
"pricemultiplier" -> PriceMultiplierAction
"budget" -> BudgetAction
low (float): The lower bound of the action space
high (float): The upper bound of the action space
shape (tuple[int, ...]): The shape of the action space
seed (int): The seed of the random action generator.
Returns:
Action: An instantiated action.
"""
states = {
"price": PriceAction,
"budget": BudgetAction,
"pricemultiplier": PriceMultiplierAction,
}
return experiment.factory(
kind, states, low=low, high=high, shape=shape, seed=seed, **kwargs
)
| 3,469 | 26.539683 | 86 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/entity/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from simulation.entity.entity import Agent, Entity, entitygroupfactory
| 147 | 28.6 | 70 |
py
|
autoagora-agents
|
autoagora-agents-master/simulation/entity/entity.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import random
import experiment
from simulation.entity.action import actionfactory
from simulation.entity.state import statefactory
class Entity:
"""An entity is an object with a state space, but without an action space.
Attributes:
group (str): To which group this entity belongs. E.g., "consumer" or "indexer".
i (int): The index of the entity.
name (str): kind_i
state (State): The state of the entity
state_space (spaces.Space): The state space of the entity.
"""
def __init__(self, *, group: str, i: int, state: dict, seed: int, **kwargs) -> None:
self.group = group
self.i = i
self.name = f"{group}_{i}"
self.state = statefactory(**state)
def reset(self) -> None:
"""Reset the entity."""
self.state.reset()
class Agent(Entity):
"""An entity is an object with a state space and an action space.
Keyword Arguments:
seed (int): The random seed of the entity.
Attributes:
action (Action): The action taken by the agent
"""
def __init__(
self, *, group: str, i: int, state: dict, action: dict, seed: int, **kwargs
) -> None:
super().__init__(group=group, i=i, state=state, seed=seed)
self.action = actionfactory(seed=seed, **action)
def reset(self) -> None:
"""Reset the agent."""
self.state.reset()
def entitygroupfactory(*, kind: str, count: int, **kwargs) -> list[Entity]:
"""Instantiate new entities of a particular group.
Keyword Arguments:
kind (str): "entity" or "agent".
count (int): The number of entities in this group.
Returns:
list[Entity]: A list of instantiated entities.
"""
edict = {"entity": Entity, "agent": Agent}
group = [
experiment.factory(kind, edict, i=i, seed=random.randint(0, 10000), **kwargs)
for i in range(count)
]
return group
| 2,015 | 27.8 | 88 |
py
|
autoagora-agents
|
autoagora-agents-master/autoagora_agents/algorithm.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABC, abstractmethod
import numpy as np
import torch
from torch import optim
import experiment
from autoagora_agents import buffer
from autoagora_agents.distribution import distributionfactory
class Algorithm(ABC):
"""Base class for algorithms.
Concretions must implement :meth:`__call__`.
Attributes:
niterations (int): Number of times the algorithm has been called.
nupdates (int): Number of times the algorithm has been updated.
group (str): The group to which the algorithm belongs.
i (int): The index of the algorithm.
name (str): The group and index of the algorithm.
"""
def __init__(self, *, group: str, i: int) -> None:
self.niterations = 0
self.nupdates = 0
self.group = group
self.i = i
self.name = f"{group}_{i}"
def reset(self) -> None:
"""Reset the algorithm's state."""
self.niterations = 0
def update(self) -> None:
"""Update the algorithm's parameters."""
self.nupdates += 1
@abstractmethod
def __call__(
self,
*,
observation: np.ndarray,
action: np.ndarray,
reward: float,
done: bool,
) -> np.ndarray:
"""Run the algorithm forward.
Keyword Arguments:
observation (np.ndarray): The observation seen by the agent.
action (np.ndarray): The previous action taken by the agent.
reward (float): The reward of the agent.
done (bool): If True, the agent is no longer in the game.
Returns:
np.ndarray: The next action taken by the agent.
"""
pass
@staticmethod
def advantage(rewards: torch.Tensor) -> torch.Tensor:
"""Compute a simple advantage estimate.
In effect, this is just standardising the samples to N(0, 1)
Arguments:
rewards (torch.Tensor): The reward-history using which to compute the advantage
Returns:
torch.Tensor: The advantage estimate
"""
std = rewards.std()
if torch.isnan(std) or std == 0:
adv = rewards
else:
adv = (rewards - rewards.mean()) / rewards.std()
return torch.unsqueeze(adv, dim=1)
class PredeterminedAlgorithm(Algorithm):
"""Change to a particular value at a given timestamp.
Attributes:
timestamps (list[int]): The timestamps at which to change the outputted value.
Must start with 0.
vals (list[np.ndarray]): The values outputted.
"""
def __init__(
self, *, group: str, i: int, timestamps: list[int], vals: list[np.ndarray]
) -> None:
super().__init__(group=group, i=i)
if timestamps[0] != 0:
raise ValueError("The first timestamp must be 0.")
if len(timestamps) != len(vals):
raise ValueError("The timestamps and vals lists must have the same length")
self.timestamps = timestamps
self.vals = vals
self.ix = 0
def reset(self) -> None:
super().reset()
self.ix = 0
def __call__(
self,
*,
observation: np.ndarray,
action: np.ndarray,
reward: float,
done: bool,
) -> np.ndarray:
if self.ix != len(self.timestamps) - 1:
if self.niterations >= self.timestamps[self.ix + 1]:
self.ix += 1
self.niterations += 1
return self.vals[self.ix]
class BanditAlgorithm(Algorithm):
"""Algorithms that have no observation other than the reward.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
Attributes:
actiondist (Distribution): The distribution modelling action-selection.
buffer (deque): The buffer storing historical samples.
optimizer (optim.Optimizer): A torch optimizer.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
) -> None:
super().__init__(group=group, i=i)
self.actiondist = distributionfactory(**actiondistribution)
self.buffer = buffer.buffer(maxlength=bufferlength)
optimizer["params"] = self.actiondist.params
self.opt = optimizerfactory(**optimizer)
def reset(self):
super().reset()
self.actiondist.reset()
self.buffer.clear()
def __call__(
self,
*,
observation: np.ndarray,
action: np.ndarray,
reward: float,
done: bool,
) -> np.ndarray:
act = np.array(self.actiondist.sample())
logprob = self.actiondist.logprob(torch.as_tensor(action))
self.buffer.append(
{
"reward": reward,
"action": action,
"logprob": logprob,
}
)
self.niterations += 1
return act
def logprob(self, actions: torch.Tensor) -> torch.Tensor:
"""Compute the log probability of the action given the distribution.
Arguments:
actions (torch.Tensor): The actions for which to compute the log probability
Returns:
torch.Tensor: The log probability of the actions.
"""
return self.actiondist.logprob(actions)
# NOTE: This is experimental! Please do not use!
class VPGBandit(BanditAlgorithm):
"""Bandit using a Vanilla Policy Gradient update.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
) -> None:
super().__init__(
group=group,
i=i,
bufferlength=bufferlength,
actiondistribution=actiondistribution,
optimizer=optimizer,
)
def _vpgpiloss(self, *, reward: torch.Tensor, action: torch.Tensor) -> torch.Tensor:
"""Compute the VPG policy loss.
Tries to push the policy to maximise the probability of taking actions that
maximise the return via an advantage function, which is an lower-variance
Q-function.
Keyword Arguments:
reward (torch.Tensor): The rewards associated with taking each action.
action (torch.Tensor): The actions the agent took.
Returns:
torch.Tensor: The policy loss
"""
adv = self.advantage(reward)
logprob = self.logprob(action)
# Treat the different gaussians as independent. Don't mean across them.
loss = -torch.mean(logprob * adv, dim=0)
return loss
def update(self):
if not buffer.isfull(self.buffer):
return
super().update()
rewards = buffer.get("reward", self.buffer)
actions = buffer.get("action", self.buffer)
loss = self._vpgpiloss(reward=rewards, action=actions)
# The fudge factor has been found to empirically be the best balance between the
# standard deviation growing without exploding.
fudgefactor = -5
alexisterm = torch.exp(-self.actiondist.logstddev + fudgefactor) # type: ignore
loss += alexisterm
# Backprop
self.opt.zero_grad()
torch.sum(loss).backward()
self.opt.step()
self.buffer.clear()
# NOTE: This is experimental. Do not use!
class PPOBandit(BanditAlgorithm):
"""Bandit with a PPO update.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
ppoiterations (int): The number of iterations to update the policy for before
stopping the update step.
epsclip (float): The clip value.
entropycoeff (float): How much to weight the entropy term in the loss.
pullbackstrength (float): How strongly to apply pullback to the initial distribution.
stddevfallback (bool): Whether to do fallback for the standard deviation.
Attributes:
ppoiterations (int): The number of iterations to update the policy for before
stopping the update step.
epsclip (float): The clip value.
entropycoeff (float): How much to weight the entropy term in the loss.
pullbackstrength (float): How strongly to apply pullback to the initial distribution.
stddevfallback (bool): Whether to do fallback for the standard deviation.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
ppoiterations: int,
epsclip: float,
entropycoeff: float,
pullbackstrength: float,
stddevfallback: bool,
) -> None:
super().__init__(
group=group,
i=i,
bufferlength=bufferlength,
actiondistribution=actiondistribution,
optimizer=optimizer,
)
self.ppoiterations = ppoiterations
self.epsclip = epsclip
self.entropycoeff = entropycoeff
self.pullbackstrength = pullbackstrength
self.stddevfallback = stddevfallback
def _ppoloss(
self, *, actions: torch.Tensor, logprob: torch.Tensor, adv: torch.Tensor
) -> torch.Tensor:
nlogprob = self.actiondist.logprob(actions)
ratio = torch.exp(nlogprob - logprob)
loss = -torch.min(
ratio * adv,
torch.clip(ratio, min=1 - self.epsclip, max=1 + self.epsclip) * adv,
)
return loss
def _entropyloss(self) -> torch.Tensor:
"""Penalise high entropies."""
return -self.actiondist.entropy() * self.entropycoeff
def _update(self) -> bool:
if not buffer.isfull(self.buffer):
return False
super().update()
rewards = buffer.get("reward", self.buffer)
actions = buffer.get("action", self.buffer)
adv = self.advantage(rewards)
logprob = self.logprob(actions).detach()
for _ in range(self.ppoiterations):
ppoloss = self._ppoloss(actions=actions, logprob=logprob, adv=adv)
entropyloss = self._entropyloss()
loss = torch.mean(ppoloss + entropyloss, dim=0)
# Pullback
loss += (
torch.abs(self.actiondist.unclampedmean - self.actiondist.initial_mean)
* self.pullbackstrength
)
if self.stddevfallback:
diff = self.actiondist.logstddev - torch.log(
self.actiondist.initial_stddev
)
loss += torch.where(
diff > 0.0, diff * self.pullbackstrength, torch.zeros_like(diff)
)
self.opt.zero_grad()
torch.sum(loss).backward()
self.opt.step()
return True
def update(self):
ran = self._update()
if ran:
self.buffer.clear()
# NOTE: This is experimental. Do not use!
class RollingMemoryPPOBandit(PPOBandit):
"""Bandit with a PPO update wherein the buffer is maintained in an off-policy way.
Keyword Arguments:
group (str): The group to which the algorithm belongs.
i (int): The id value of the object within the group.
bufferlength (int): The length of the buffer storing historical samples.
actiondistribution (dict): The config for the distribution representing the action.
optimizer (dict): The config for the optimizer.
ppoiterations (int): The number of iterations to update the policy for before
stopping the update step.
epsclip (float): The clip value.
entropycoeff (float): How much to weight the entropy term in the loss.
pullbackstrength (float): How strongly to apply pullback to the initial distribution.
stddevfallback (bool): Whether to do fallback for the standard deviation.
"""
def __init__(
self,
*,
group: str,
i: int,
bufferlength: int,
actiondistribution: dict,
optimizer: dict,
ppoiterations: int,
epsclip: float,
entropycoeff: float,
pullbackstrength: float,
stddevfallback: bool,
) -> None:
super().__init__(
group=group,
i=i,
bufferlength=bufferlength,
actiondistribution=actiondistribution,
optimizer=optimizer,
ppoiterations=ppoiterations,
epsclip=epsclip,
entropycoeff=entropycoeff,
pullbackstrength=pullbackstrength,
stddevfallback=stddevfallback,
)
def logprob(self, _):
return buffer.get("logprob", self.buffer).unsqueeze(dim=1)
def update(self):
_ = self._update()
def algorithmgroupfactory(*, kind: str, count: int, **kwargs) -> list[Algorithm]:
"""Instantiate new algorithms for a particular group.
Keyword Arguments:
kind (str): The type of algorithm to instantiate.
"vpgbandit" -> VPGBandit
"ppobandit" -> PPOBandit
"rmppobandit" -> RollingMemoryPPOBandit
"predetermined" -> PredeterminedAlgorithm
count (int): The number of entities in this group.
Returns:
list[Algorithm]: A list of instantiated algorithms.
"""
algs = {
"vpgbandit": VPGBandit,
"ppobandit": PPOBandit,
"rmppobandit": RollingMemoryPPOBandit,
"predetermined": PredeterminedAlgorithm,
}
group = [experiment.factory(kind, algs, i=i, **kwargs) for i in range(count)]
return group
def optimizerfactory(*, kind: str, **kwargs) -> optim.Optimizer:
"""Return the requested optimiser.
Keyword Arguments:
kind (str): The type of optimiser to instantiate.
"adam" -> optim.Adam
"sgd" -> optim.SGD
"rmsprop" -> optim.RMSprop
Returns:
optim.Optimizer: The optimiser
"""
opts = {"adam": optim.Adam, "sgd": optim.SGD, "rmsprop": optim.RMSprop}
opt = experiment.factory(kind, opts, **kwargs)
return opt
| 15,179 | 31.229299 | 93 |
py
|
autoagora-agents
|
autoagora-agents-master/autoagora_agents/distribution.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from abc import ABC, abstractmethod, abstractproperty
from typing import Union
import numpy as np
import torch
from torch import nn
import experiment
ArrayLike = Union[np.ndarray, torch.Tensor]
class Distribution(ABC):
"""The base class for distributions."""
def __init__(self) -> None:
super().__init__()
@abstractmethod
def reset(self) -> None:
"""Reset the distribution to its initial values."""
pass
@abstractproperty
def initial_mean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Initial mean of the distribution."""
pass
@abstractproperty
def initial_stddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Initial standard deviation of the distribution."""
pass
@abstractproperty
def logstddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: The log standard deviation of the distribution."""
pass
@abstractproperty
def mean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Mean of the distribution."""
pass
@abstractproperty
def unclampedmean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Unclamped mean of the distribution."""
pass
@abstractproperty
def stddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Standard deviation of the distribution."""
pass
@abstractproperty
def distribution(self) -> torch.distributions.Distribution: # type: ignore
"""torch.distributions.Distribution: The torch distribution."""
pass
@abstractproperty
def params(self) -> list[torch.Tensor]: # type: ignore
"""list[torch.Tensor]: The trainable parameters."""
pass
@abstractmethod
def sample(self) -> torch.Tensor:
"""torch.Tensor: Sample the gaussian distribution."""
pass
@abstractmethod
def logprob(self, x: torch.Tensor) -> torch.Tensor:
"""The log probability of the PDF at x.
Arguments:
x (torch.Tensor): A sample.
Returns:
torch.Tensor: The log probability.
"""
pass
@abstractmethod
def entropy(self) -> torch.Tensor:
"""The entropy of the distribution."""
pass
class GaussianDistribution(Distribution):
"""A Gaussian distribution.
Keyword Arguments:
intial_mean (ArrayLike): The means of each gaussian distribution. For example,
for multi-product, you would set one initial mean per product.
minmean (ArrayLike): The minimum value the mean can take on.
maxmean (ArrayLike): The maximum value the mean can take on.
intial_stddev (ArrayLike): The standard deviations of each gaussian
distribution.
minstddev (ArrayLike): The minimum value the standard deviation can take on.
maxstddev (ArrayLike): The maximum value the standard deviation can take on.
Attributes:
mean (torch.Tensor): The clamped mean of the distribution.
minmean (torch.Tensor): The minimum value the mean can take on.
maxmean (torch.Tensor): The maximum value the mean can take on.
stddev (torch.Tensor): The clamped standard deviation of the distribution.
minstddev (torch.Tensor): The minimum value the standard deviation can take on.
maxstddev (torch.Tensor): The maximum value the standard deviation can take on.
"""
def __init__(
self,
*,
initial_mean: ArrayLike,
minmean: ArrayLike,
maxmean: ArrayLike,
initial_stddev: ArrayLike,
minstddev: ArrayLike,
maxstddev: ArrayLike,
) -> None:
super().__init__()
self._initial_mean = torch.as_tensor(initial_mean)
self.maxmean = torch.as_tensor(maxmean)
self.minmean = torch.as_tensor(minmean)
self._mean = nn.parameter.Parameter(self.initial_mean)
self._initial_stddev = torch.as_tensor(initial_stddev)
self.maxstddev = torch.as_tensor(maxstddev)
self.minstddev = torch.as_tensor(minstddev)
self._logstddev = nn.parameter.Parameter(torch.log(self.initial_stddev))
@property
def mean(self) -> torch.Tensor:
return torch.clip(self._mean, min=self.minmean, max=self.maxmean)
@property
def stddev(self) -> torch.Tensor:
return torch.clip(
torch.exp(self.logstddev), min=self.minstddev, max=self.maxstddev
)
@property
def logstddev(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: The log standard deviation of the distribution."""
return self._logstddev
@property
def unclampedmean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Unclamped mean of the distribution."""
return self._mean
@property
def initial_mean(self) -> torch.Tensor:
return self._initial_mean
@property
def initial_stddev(self) -> torch.Tensor:
return self._initial_stddev
def reset(self) -> None:
self._mean = nn.parameter.Parameter(self.initial_mean)
self._logstddev = nn.parameter.Parameter(torch.log(self.initial_stddev))
@property
def distribution(self) -> torch.distributions.Distribution:
return torch.distributions.Normal(loc=self.mean, scale=self.stddev)
@property
def params(self) -> list[torch.Tensor]:
return [self._mean, self._logstddev]
def sample(self) -> torch.Tensor:
return self.distribution.rsample().detach()
def logprob(self, x: torch.Tensor) -> torch.Tensor:
return self.distribution.log_prob(x)
def entropy(self) -> torch.Tensor:
return self.distribution.entropy()
class ScaledGaussianDistribution(GaussianDistribution):
"""A Gaussian distribution wherein the gaussian is in a scaled space.
In the scaled space, the mean is multiplied by the inverse scale factor and then put
into log space. This also applies to the bounds on the mean below.
Keyword Arguments:
scalefactor (np.ndarray): The scale factor for each gaussian distribution.
Attributes:
scalefactor (torch.Tensor): The scale factor for each gaussian distribution.
"""
def __init__(
self,
*,
initial_mean: ArrayLike,
minmean: ArrayLike,
maxmean: ArrayLike,
initial_stddev: ArrayLike,
minstddev: ArrayLike,
maxstddev: ArrayLike,
scalefactor: np.ndarray,
) -> None:
self.scalefactor = torch.as_tensor(scalefactor)
super().__init__(
initial_mean=self.inversescale(torch.as_tensor(initial_mean)),
minmean=self.inversescale(torch.as_tensor(minmean)),
maxmean=self.inversescale(torch.as_tensor(maxmean)),
initial_stddev=initial_stddev,
maxstddev=maxstddev,
minstddev=minstddev,
)
@property
def invscalefactor(self) -> torch.Tensor:
"""torch.Tensor: The inverse scale factor for each gaussian distribution."""
return 1 / self.scalefactor
def inversescale(self, x: torch.Tensor) -> torch.Tensor:
"""Apply the inverse scaling operation to x."""
return torch.log(torch.multiply(self.invscalefactor, x))
def scale(self, x: torch.Tensor) -> torch.Tensor:
"""Apply the scaling operation to x."""
return torch.multiply(self.scalefactor, torch.exp(x))
def sample(self) -> torch.Tensor:
"""Sample and return values in the scaled space."""
return self.scale(self.unscaledsample())
def unscaledsample(self) -> torch.Tensor:
"""Sample and return values in the unscaled space."""
return self.distribution.rsample().detach()
def logprob(self, x: torch.Tensor) -> torch.Tensor:
"""The log probability of the PDF at x.
Arguments:
x (torch.Tensor): A sample in the scaled space.
Returns:
torch.Tensor: The log probability.
"""
y = self.inversescale(x)
return self.distribution.log_prob(y)
# We don't make this a subclass of GaussianDistribution with stddev 0
# because torch.distributions.Normal doesn't allow stddev = 0
class DegenerateDistribution(Distribution):
"""A degenerate (deterministic) distribution.
Keyword Arguments:
initial_value (np.ndarray): The initial value of the distribution.
minvalue (np.ndarray): The minimum value of the distribution.
maxvalue (np.ndarray): The maximum value of the distribution.
Attributes:
initial_value (torch.Tensor): The initial value of the distribution.
minvalue (torch.Tensor): The minimum value of the distribution.
maxvalue (torch.Tensor): The maximum value of the distribution.
value (torch.Tensor): The clamped value of the distribution.
"""
def __init__(
self,
*,
initial_value: np.ndarray,
minvalue: np.ndarray,
maxvalue: np.ndarray,
) -> None:
super().__init__()
self.initial_value = torch.as_tensor(initial_value)
self.minvalue = torch.as_tensor(minvalue)
self.maxvalue = torch.as_tensor(maxvalue)
self._value = nn.parameter.Parameter(self.initial_value)
@property
def value(self) -> torch.Tensor:
return torch.clip(self._value, min=self.minvalue, max=self.maxvalue)
@property
def mean(self) -> torch.Tensor:
return self.value
@property
def stddev(self) -> torch.Tensor:
return torch.zeros_like(self.value)
@property
def logstddev(self) -> torch.Tensor:
return torch.log(self.stddev)
@property
def unclampedmean(self) -> torch.Tensor: # type: ignore
"""torch.Tensor: Unclamped mean of the distribution."""
return self._value
@property
def initial_mean(self) -> torch.Tensor:
return self.initial_value
@property
def initial_stddev(self) -> torch.Tensor:
return self.stddev
@property
def params(self) -> list[torch.Tensor]:
return [self._value]
def reset(self) -> None:
self._value = nn.parameter.Parameter(self.initial_value)
def sample(self) -> torch.Tensor:
return self.value
def logprob(self, _: torch.Tensor) -> torch.Tensor:
return torch.zeros_like(self._value)
def entropy(self) -> torch.Tensor:
return torch.zeros_like(self._value)
@property
def distribution(self) -> torch.distributions.Distribution:
return torch.distributions.Normal(
loc=self.value, scale=torch.zeros_like(self.value)
)
def distributionfactory(*, kind: str, **kwargs) -> Distribution:
"""Instantiate a new distribution.
Keyword Arguments:
kind (str): The type of distribution to instantiate.
"gaussian" -> GaussianDistribution
"scaledgaussian" -> ScaledGaussianDistribution
"degenerate" -> DegenerateDistribution
Returns:
Distribution: An instantiated distribution.
"""
dists = {
"gaussian": GaussianDistribution,
"scaledgaussian": ScaledGaussianDistribution,
"degenerate": DegenerateDistribution,
}
return experiment.factory(kind, dists, **kwargs)
| 11,376 | 31.229462 | 88 |
py
|
autoagora-agents
|
autoagora-agents-master/autoagora_agents/buffer.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from collections import deque
from typing import Any
import torch
def buffer(*, maxlength: int) -> deque[dict[str, Any]]:
"""Create a buffer.
Keyword Arguments:
maxlength (int): The maximum length of the buffer.
Returns:
deque[dict[str, Any]]: The empty buffer.
"""
b: deque[dict[str, Any]] = deque(maxlen=maxlength)
return b
def isfull(b: deque[dict[str, torch.Tensor]]) -> bool:
"""Return true if the buffer is full. Else false."""
return len(b) == b.maxlen
def get(k: str, b: deque[dict[str, Any]]) -> torch.Tensor:
"""Get key from elements of the buffer.
Arguments:
k (str): The key.
b (deque[dict[str, Any]]): The empty buffer.
Returns:
torch.TensorList: The matching elements
"""
return torch.as_tensor([_b[k] for _b in b])
| 909 | 22.333333 | 58 |
py
|
autoagora-agents
|
autoagora-agents-master/autoagora_agents/controller.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
import random
from typing import Any
import numpy as np
import torch
from autoagora_agents.algorithm import Algorithm, algorithmgroupfactory
class Controller:
"""Holds all algorithms and routes information to each.
Keyword Arguments:
agents (list[dict[str, Any]]): A list of the configs for each agent
group.
seed (int): The seed for torch.
Attributes:
groups (dict[str, Algorithm]): A dictionary mapping agent groups to algorithms.
"""
def __init__(self, *, agents: list[dict[str, Any]], seed: int) -> None:
self.groups = {a["group"]: algorithmgroupfactory(**a) for a in agents}
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
def __call__(
self,
*,
observations: dict[str, np.ndarray],
actions: dict[str, np.ndarray],
rewards: dict[str, float],
dones: dict[str, bool]
) -> dict[str, np.ndarray]:
"""Call each algorithm.
Keyword Arguments:
observations (dict[str, np.ndarray]): The observations of each agent.
actions (dict[str, np.ndarray]): The action of each agent.
rewards (dict[str, float]): The reward received by each agent.
dones (dict[str, bool]): Whether each agent is done.
Returns:
dict[str, np.ndarray]: The next actions of each agent.
"""
acts = {}
for alg in self.algorithmslist:
acts[alg.name] = alg(
observation=observations[alg.name],
action=actions[alg.name],
reward=rewards[alg.name],
done=dones[alg.name],
)
return acts
def update(self) -> None:
"""Update each algorithm."""
for alg in self.algorithmslist:
alg.update()
@property
def algorithmslist(self) -> list[Algorithm]:
"""The algorithms for agents in each group."""
algs = []
for a in self.groups.values():
algs.extend(a)
return algs
| 2,147 | 28.833333 | 87 |
py
|
autoagora-agents
|
autoagora-agents-master/autoagora_agents/__init__.py
|
# Copyright 2022-, Semiotic AI, Inc.
# SPDX-License-Identifier: Apache-2.0
from typing import Any
import sacred
from autoagora_agents.controller import Controller
algorithm_ingredient = sacred.Ingredient("algorithm")
@algorithm_ingredient.capture
def controller(*, agents: list[dict[str, Any]], seed: int) -> Controller:
"""Construct a controller from the algorithm config.
Keyword Arguments:
agents (list[dict[str, Any]]): The configs for the agents in each group.
seed (int): The random seed.
Returns:
Controller: An instantiated controller for stepping and updating agents together.
"""
return Controller(agents=agents, seed=seed)
| 686 | 26.48 | 89 |
py
|
flink
|
flink-master/tools/list_deps.py
|
#!/usr/bin/env python
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import xml.etree.ElementTree as ET
import os
import fnmatch
import sys
# This lists all dependencies in the Maven Project root given as first
# argument. If a dependency is included in several versions it is listed once
# for every version. The result output is sorted. So this can be used
# to get a diff between the Maven dependencies of two versions of a project.
path = sys.argv[1]
pomfiles = [os.path.join(dirpath, f)
for dirpath, dirnames, files in os.walk(path)
for f in fnmatch.filter(files, 'pom.xml')]
deps = set()
for pom in pomfiles:
root = ET.parse(pom).getroot()
for dep in root.iter('{http://maven.apache.org/POM/4.0.0}dependency'):
groupId = dep.find("{http://maven.apache.org/POM/4.0.0}groupId").text
artifactId = dep.find("{http://maven.apache.org/POM/4.0.0}artifactId").text
version = dep.find("{http://maven.apache.org/POM/4.0.0}version")
# if it has no version tag it must be in dependencyManagement somewhere, with
# a version tag, so we already have it in our list
if version != None:
if "flink" in groupId: continue
depstring = groupId + " " + artifactId + " " + version.text
deps.add(depstring)
deplist = list(deps)
deplist.sort()
for dep in deplist:
print dep
#for atype in e.findall('dependency'):
# print(atype.get('foobar'))
| 2,350 | 36.31746 | 85 |
py
|
flink
|
flink-master/flink-end-to-end-tests/flink-python-test/python/python_job.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import logging
import os
import shutil
import sys
import tempfile
from pyflink.table import EnvironmentSettings, TableEnvironment
from pyflink.table.expressions import col, call, lit
def word_count():
content = "line Licensed to the Apache Software Foundation ASF under one " \
"line or more contributor license agreements See the NOTICE file " \
"line distributed with this work for additional information " \
"line regarding copyright ownership The ASF licenses this file " \
"to you under the Apache License Version the " \
"License you may not use this file except in compliance " \
"with the License"
t_env = TableEnvironment.create(EnvironmentSettings.in_batch_mode())
# used to test pipeline.jars and pipeline.classpaths
config_key = sys.argv[1]
config_value = sys.argv[2]
t_env.get_config().set(config_key, config_value)
# register Results table in table environment
tmp_dir = tempfile.gettempdir()
result_path = tmp_dir + '/result'
if os.path.exists(result_path):
try:
if os.path.isfile(result_path):
os.remove(result_path)
else:
shutil.rmtree(result_path)
except OSError as e:
logging.error("Error removing directory: %s - %s.", e.filename, e.strerror)
logging.info("Results directory: %s", result_path)
sink_ddl = """
create table Results(
word VARCHAR,
`count` BIGINT,
`count_java` BIGINT
) with (
'connector.type' = 'filesystem',
'format.type' = 'csv',
'connector.path' = '{}'
)
""".format(result_path)
t_env.execute_sql(sink_ddl)
t_env.execute_sql("create temporary system function add_one as 'add_one.add_one' language python")
t_env.register_java_function("add_one_java", "org.apache.flink.python.tests.util.AddOne")
elements = [(word, 0) for word in content.split(" ")]
t = t_env.from_elements(elements, ["word", "count"])
t.select(t.word,
call("add_one", t.count).alias("count"),
call("add_one_java", t.count).alias("count_java")) \
.group_by(t.word) \
.select(t.word,
col("count").count.alias("count"),
col("count_java").count.alias("count_java")) \
.execute_insert("Results")
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
word_count()
| 3,521 | 38.573034 | 102 |
py
|
flink
|
flink-master/flink-end-to-end-tests/flink-python-test/python/add_one.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.table import DataTypes
from pyflink.table.udf import udf
@udf(input_types=[DataTypes.BIGINT()], result_type=DataTypes.BIGINT())
def add_one(i):
import pytest
return i + 1
| 1,152 | 43.346154 | 80 |
py
|
flink
|
flink-master/flink-end-to-end-tests/flink-python-test/python/datastream/functions.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.datastream.functions import KeySelector
class MyKeySelector(KeySelector):
def get_key(self, value):
return value[1]
| 1,103 | 41.461538 | 80 |
py
|
flink
|
flink-master/flink-end-to-end-tests/flink-python-test/python/datastream/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-end-to-end-tests/flink-python-test/python/datastream/data_stream_job.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Any
from pyflink.common import Duration
from pyflink.common.serialization import SimpleStringSchema
from pyflink.common.typeinfo import Types
from pyflink.common.watermark_strategy import TimestampAssigner, WatermarkStrategy
from pyflink.datastream import StreamExecutionEnvironment, TimeCharacteristic
from pyflink.datastream.connectors import FlinkKafkaProducer, FlinkKafkaConsumer
from pyflink.datastream.formats.json import JsonRowDeserializationSchema
from pyflink.datastream.functions import KeyedProcessFunction
from functions import MyKeySelector
def python_data_stream_example():
env = StreamExecutionEnvironment.get_execution_environment()
# Set the parallelism to be one to make sure that all data including fired timer and normal data
# are processed by the same worker and the collected result would be in order which is good for
# assertion.
env.set_parallelism(1)
env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
type_info = Types.ROW_NAMED(['createTime', 'orderId', 'payAmount', 'payPlatform', 'provinceId'],
[Types.LONG(), Types.LONG(), Types.DOUBLE(), Types.INT(),
Types.INT()])
json_row_schema = JsonRowDeserializationSchema.builder().type_info(type_info).build()
kafka_props = {'bootstrap.servers': 'localhost:9092', 'group.id': 'pyflink-e2e-source'}
kafka_consumer = FlinkKafkaConsumer("timer-stream-source", json_row_schema, kafka_props)
kafka_producer = FlinkKafkaProducer("timer-stream-sink", SimpleStringSchema(), kafka_props)
watermark_strategy = WatermarkStrategy.for_bounded_out_of_orderness(Duration.of_seconds(5))\
.with_timestamp_assigner(KafkaRowTimestampAssigner())
kafka_consumer.set_start_from_earliest()
ds = env.add_source(kafka_consumer).assign_timestamps_and_watermarks(watermark_strategy)
ds.key_by(MyKeySelector(), key_type=Types.LONG()) \
.process(MyProcessFunction(), output_type=Types.STRING()) \
.add_sink(kafka_producer)
env.execute_async("test data stream timer")
class MyProcessFunction(KeyedProcessFunction):
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
result = "Current key: {}, orderId: {}, payAmount: {}, timestamp: {}".format(
str(ctx.get_current_key()), str(value[1]), str(value[2]), str(ctx.timestamp()))
yield result
current_watermark = ctx.timer_service().current_watermark()
ctx.timer_service().register_event_time_timer(current_watermark + 1500)
def on_timer(self, timestamp, ctx: 'KeyedProcessFunction.OnTimerContext'):
yield "On timer timestamp: " + str(timestamp)
class KafkaRowTimestampAssigner(TimestampAssigner):
def extract_timestamp(self, value: Any, record_timestamp: int) -> int:
return int(value[0])
if __name__ == '__main__':
python_data_stream_example()
| 3,893 | 46.487805 | 100 |
py
|
flink
|
flink-master/flink-end-to-end-tests/test-scripts/python3_fileserver.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import http.server
import socketserver
handler = http.server.SimpleHTTPRequestHandler
# azure says that ports are still in use if this is not set
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", 9999), handler)
try:
httpd.handle_request()
except:
httpd.shutdown()
| 1,271 | 38.75 | 80 |
py
|
flink
|
flink-master/flink-end-to-end-tests/test-scripts/python2_fileserver.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import SimpleHTTPServer
import SocketServer
handler = SimpleHTTPServer.SimpleHTTPRequestHandler
# azure says that ports are still in use if this is not set
SocketServer.TCPServer.allow_reuse_address = True
httpd = SocketServer.TCPServer(("", 9999), handler)
try:
httpd.handle_request()
except:
httpd.shutdown()
| 1,281 | 39.0625 | 80 |
py
|
flink
|
flink-master/flink-python/setup.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import io
import os
import platform
import sys
from distutils.command.build_ext import build_ext
from shutil import copytree, copy, rmtree
from setuptools import setup, Extension
if sys.version_info < (3, 7):
print("Python versions prior to 3.7 are not supported for PyFlink.",
file=sys.stderr)
sys.exit(-1)
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def copy_files(src_paths, output_directory):
for src_path, file_mode in src_paths:
if os.path.isdir(src_path):
child_files = os.listdir(src_path)
for child_file in child_files:
dst_path = copy(os.path.join(src_path, child_file), output_directory)
os.chmod(dst_path, file_mode)
else:
dst_path = copy(src_path, os.path.join(output_directory, os.path.basename(src_path)))
os.chmod(dst_path, file_mode)
def has_unsupported_tag(file_element):
unsupported_tags = ['includes', 'exclueds']
for unsupported_tag in unsupported_tags:
if file_element.getElementsByTagName(unsupported_tag):
print('Unsupported <{0}></{1}> tag'.format(unsupported_tag, unsupported_tag))
return True
return False
def extracted_output_files(base_dir, file_path, output_directory):
extracted_file_paths = []
from xml.dom.minidom import parse
dom = parse(file_path)
root_data = dom.documentElement
file_elements = (root_data.getElementsByTagName("files")[0]).getElementsByTagName("file")
# extracted <files><file></file></files>
for file_element in file_elements:
source = ((file_element.getElementsByTagName('source')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
# extracted <fileSets><fileSet></fileSet></fileSets>
file_elements = (root_data.getElementsByTagName("fileSets")[0]).getElementsByTagName("fileSet")
for file_element in file_elements:
source = ((file_element.getElementsByTagName('directory')[0]).childNodes[0]).data
file_mode = int(((file_element.getElementsByTagName('fileMode')[0]).childNodes[0]).data, 8)
try:
dst = ((file_element.getElementsByTagName('outputDirectory')[0]).childNodes[0]).data
if dst == output_directory:
if has_unsupported_tag(file_element):
sys.exit(-1)
extracted_file_paths.append((os.path.join(base_dir, source), file_mode))
except IndexError:
pass
return extracted_file_paths
# Currently Cython optimizing doesn't support Windows.
if platform.system() == 'Windows':
extensions = ([])
else:
try:
from Cython.Build import cythonize
extensions = cythonize([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.pyx"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.pyx"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream_fast",
sources=["pyflink/fn_execution/beam/beam_stream_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.pyx"],
include_dirs=["pyflink/fn_execution/beam"]),
])
except ImportError:
if os.path.exists("pyflink/fn_execution/coder_impl_fast.c"):
extensions = ([
Extension(
name="pyflink.fn_execution.coder_impl_fast",
sources=["pyflink/fn_execution/coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.table.aggregate_fast",
sources=["pyflink/fn_execution/table/aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.table.window_aggregate_fast",
sources=["pyflink/fn_execution/table/window_aggregate_fast.c"],
include_dirs=["pyflink/fn_execution/table/"]),
Extension(
name="pyflink.fn_execution.stream_fast",
sources=["pyflink/fn_execution/stream_fast.c"],
include_dirs=["pyflink/fn_execution/"]),
Extension(
name="pyflink.fn_execution.beam.beam_stream_fast",
sources=["pyflink/fn_execution/beam/beam_stream_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_coder_impl_fast",
sources=["pyflink/fn_execution/beam/beam_coder_impl_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
Extension(
name="pyflink.fn_execution.beam.beam_operations_fast",
sources=["pyflink/fn_execution/beam/beam_operations_fast.c"],
include_dirs=["pyflink/fn_execution/beam"]),
])
else:
extensions = ([])
this_directory = os.path.abspath(os.path.dirname(__file__))
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LOG_TEMP_PATH = os.path.join(TEMP_PATH, "log")
EXAMPLES_TEMP_PATH = os.path.join(TEMP_PATH, "examples")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
PYFLINK_UDF_RUNNER_SH = "pyflink-udf-runner.sh"
PYFLINK_UDF_RUNNER_BAT = "pyflink-udf-runner.bat"
in_flink_source = os.path.isfile("../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
FLINK_ROOT = os.path.abspath("..")
FLINK_DIST = os.path.join(FLINK_ROOT, "flink-dist")
FLINK_BIN = os.path.join(FLINK_DIST, "src/main/flink-bin")
EXAMPLES_PATH = os.path.join(this_directory, "pyflink/examples")
LICENSE_FILE_PATH = os.path.join(FLINK_ROOT, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_BIN, "README.txt")
FLINK_BIN_XML_FILE = os.path.join(FLINK_BIN, '../assemblies/bin.xml')
# copy conf files
os.mkdir(CONF_TEMP_PATH)
conf_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'conf')
copy_files(conf_paths, CONF_TEMP_PATH)
# copy bin files
os.mkdir(SCRIPTS_TEMP_PATH)
script_paths = extracted_output_files(FLINK_DIST, FLINK_BIN_XML_FILE, 'bin')
copy_files(script_paths, SCRIPTS_TEMP_PATH)
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_SH),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_SH))
copy(os.path.join(this_directory, "pyflink", "bin", PYFLINK_UDF_RUNNER_BAT),
os.path.join(SCRIPTS_TEMP_PATH, PYFLINK_UDF_RUNNER_BAT))
try:
os.symlink(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
except BaseException: # pylint: disable=broad-except
copytree(EXAMPLES_PATH, EXAMPLES_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
os.mkdir(LOG_TEMP_PATH)
with open(os.path.join(LOG_TEMP_PATH, "empty.txt"), 'w') as f:
f.write("This file is used to force setuptools to include the log directory. "
"You can delete it at any time after installation.")
else:
if not os.path.isdir(SCRIPTS_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
if VERSION.find('dev0') != -1:
apache_flink_libraries_dependency = 'apache-flink-libraries==%s' % VERSION
else:
split_versions = VERSION.split('.')
split_versions[-1] = str(int(split_versions[-1]) + 1)
NEXT_VERSION = '.'.join(split_versions)
apache_flink_libraries_dependency = 'apache-flink-libraries>=%s,<%s' % \
(VERSION, NEXT_VERSION)
script_names = ["pyflink-shell.sh", "find-flink-home.sh"]
scripts = [os.path.join(SCRIPTS_TEMP_PATH, script) for script in script_names]
scripts.append("pyflink/find_flink_home.py")
PACKAGES = ['pyflink',
'pyflink.table',
'pyflink.util',
'pyflink.datastream',
'pyflink.datastream.connectors',
'pyflink.datastream.formats',
'pyflink.common',
'pyflink.fn_execution',
'pyflink.fn_execution.beam',
'pyflink.fn_execution.datastream',
'pyflink.fn_execution.datastream.embedded',
'pyflink.fn_execution.datastream.process',
'pyflink.fn_execution.datastream.window',
'pyflink.fn_execution.embedded',
'pyflink.fn_execution.formats',
'pyflink.fn_execution.metrics',
'pyflink.fn_execution.metrics.embedded',
'pyflink.fn_execution.metrics.process',
'pyflink.fn_execution.table',
'pyflink.fn_execution.utils',
'pyflink.metrics',
'pyflink.conf',
'pyflink.log',
'pyflink.examples',
'pyflink.bin',
'pyflink.testing']
PACKAGE_DIR = {
'pyflink.conf': TEMP_PATH + '/conf',
'pyflink.log': TEMP_PATH + '/log',
'pyflink.examples': TEMP_PATH + '/examples',
'pyflink.bin': TEMP_PATH + '/bin'}
PACKAGE_DATA = {
'pyflink': ['README.txt'],
'pyflink.conf': ['*'],
'pyflink.log': ['*'],
'pyflink.examples': ['*.py', '*/*.py'],
'pyflink.bin': ['*']}
install_requires = ['py4j==0.10.9.7', 'python-dateutil>=2.8.0,<3', 'apache-beam==2.43.0',
'cloudpickle==2.2.0', 'avro-python3>=1.8.1,!=1.9.2,<1.10.0',
'pytz>=2018.3', 'fastavro>=1.1.0,<1.4.8', 'requests>=2.26.0',
'protobuf>=3.19.0,<=3.21',
'numpy>=1.21.4,<1.22.0',
'pandas>=1.3.0,<1.4.0',
'pyarrow>=5.0.0,<9.0.0',
'pemja==0.3.0;platform_system != "Windows"',
'httplib2>=0.19.0,<=0.20.4', apache_flink_libraries_dependency]
setup(
name='apache-flink',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
scripts=scripts,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='[email protected]',
python_requires='>=3.7',
install_requires=install_requires,
cmdclass={'build_ext': build_ext},
description='Apache Flink Python API',
long_description=long_description,
long_description_content_type='text/markdown',
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10'],
ext_modules=extensions
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
| 15,513 | 43.19943 | 99 |
py
|
flink
|
flink-master/flink-python/apache-flink-libraries/setup.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import glob
import io
import os
import platform
import subprocess
import sys
from shutil import copytree, copy, rmtree
from setuptools import setup
def remove_if_exists(file_path):
if os.path.exists(file_path):
if os.path.islink(file_path) or os.path.isfile(file_path):
os.remove(file_path)
else:
assert os.path.isdir(file_path)
rmtree(file_path)
def find_file_path(pattern):
files = glob.glob(pattern)
if len(files) < 1:
print("Failed to find the file %s." % pattern)
exit(-1)
if len(files) > 1:
print("The file pattern %s is ambiguous: %s" % (pattern, files))
exit(-1)
return files[0]
in_flink_source = os.path.isfile("../../flink-java/src/main/java/org/apache/flink/api/java/"
"ExecutionEnvironment.java")
this_directory = os.path.abspath(os.path.dirname(__file__))
pyflink_directory = os.path.join(this_directory, "pyflink")
if in_flink_source:
remove_if_exists(pyflink_directory)
os.mkdir(pyflink_directory)
version_file = os.path.join(this_directory, '../pyflink/version.py')
else:
version_file = os.path.join(this_directory, 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
VERSION = __version__ # noqa
with io.open(os.path.join(this_directory, 'README.md'), 'r', encoding='utf-8') as f:
long_description = f.read()
TEMP_PATH = "deps"
LIB_TEMP_PATH = os.path.join(TEMP_PATH, "lib")
OPT_TEMP_PATH = os.path.join(TEMP_PATH, "opt")
CONF_TEMP_PATH = os.path.join(TEMP_PATH, "conf")
LICENSES_TEMP_PATH = os.path.join(TEMP_PATH, "licenses")
PLUGINS_TEMP_PATH = os.path.join(TEMP_PATH, "plugins")
SCRIPTS_TEMP_PATH = os.path.join(TEMP_PATH, "bin")
LICENSE_FILE_TEMP_PATH = os.path.join(this_directory, "LICENSE")
NOTICE_FILE_TEMP_PATH = os.path.join(this_directory, "NOTICE")
README_FILE_TEMP_PATH = os.path.join("pyflink", "README.txt")
VERSION_FILE_TEMP_PATH = os.path.join("pyflink", "version.py")
# Due to changes in FLINK-14008, the licenses directory and NOTICE file may not exist in
# build-target folder. Just ignore them in this case.
exist_licenses = None
try:
if in_flink_source:
try:
os.mkdir(TEMP_PATH)
except:
print("Temp path for symlink to parent already exists {0}".format(TEMP_PATH),
file=sys.stderr)
sys.exit(-1)
flink_version = VERSION.replace(".dev0", "-SNAPSHOT")
FLINK_HOME = os.path.abspath(
"../../flink-dist/target/flink-%s-bin/flink-%s" % (flink_version, flink_version))
incorrect_invocation_message = """
If you are installing pyflink from flink source, you must first build Flink and
run sdist.
To build Flink with maven you can run:
mvn -DskipTests clean package
Building the source dist is done in the flink-python directory:
cd flink-python
cd apache-flink-libraries
python setup.py sdist
pip install dist/*.tar.gz"""
LIB_PATH = os.path.join(FLINK_HOME, "lib")
OPT_PATH = os.path.join(FLINK_HOME, "opt")
OPT_PYTHON_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-python*.jar")))
OPT_SQL_CLIENT_JAR_NAME = os.path.basename(
find_file_path(os.path.join(OPT_PATH, "flink-sql-client*.jar")))
LICENSES_PATH = os.path.join(FLINK_HOME, "licenses")
PLUGINS_PATH = os.path.join(FLINK_HOME, "plugins")
SCRIPTS_PATH = os.path.join(FLINK_HOME, "bin")
LICENSE_FILE_PATH = os.path.join(FLINK_HOME, "LICENSE")
README_FILE_PATH = os.path.join(FLINK_HOME, "README.txt")
VERSION_FILE_PATH = os.path.join(this_directory, "../pyflink/version.py")
exist_licenses = os.path.exists(LICENSES_PATH)
if not os.path.isdir(LIB_PATH):
print(incorrect_invocation_message, file=sys.stderr)
sys.exit(-1)
try:
os.symlink(LIB_PATH, LIB_TEMP_PATH)
support_symlinks = True
except BaseException: # pylint: disable=broad-except
support_symlinks = False
os.mkdir(OPT_TEMP_PATH)
if support_symlinks:
os.symlink(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
os.symlink(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
os.symlink(PLUGINS_PATH, PLUGINS_TEMP_PATH)
os.symlink(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
os.symlink(README_FILE_PATH, README_FILE_TEMP_PATH)
os.symlink(VERSION_FILE_PATH, VERSION_FILE_TEMP_PATH)
else:
copytree(LIB_PATH, LIB_TEMP_PATH)
copy(os.path.join(OPT_PATH, OPT_PYTHON_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_PYTHON_JAR_NAME))
copy(os.path.join(OPT_PATH, OPT_SQL_CLIENT_JAR_NAME),
os.path.join(OPT_TEMP_PATH, OPT_SQL_CLIENT_JAR_NAME))
copytree(PLUGINS_PATH, PLUGINS_TEMP_PATH)
copy(LICENSE_FILE_PATH, LICENSE_FILE_TEMP_PATH)
copy(README_FILE_PATH, README_FILE_TEMP_PATH)
copy(VERSION_FILE_PATH, VERSION_FILE_TEMP_PATH)
os.mkdir(SCRIPTS_TEMP_PATH)
bin_jars = glob.glob(os.path.join(SCRIPTS_PATH, "*.jar"))
for bin_jar in bin_jars:
copy(bin_jar, os.path.join(SCRIPTS_TEMP_PATH, os.path.basename(bin_jar)))
if exist_licenses and platform.system() != "Windows":
# regenerate the licenses directory and NOTICE file as we only copy part of the
# flink binary distribution.
collect_licenses_file_sh = os.path.abspath(os.path.join(
this_directory, "..", "..", "tools", "releasing", "collect_license_files.sh"))
subprocess.check_output([collect_licenses_file_sh, TEMP_PATH, TEMP_PATH])
# move the NOTICE file to the root of the package
GENERATED_NOTICE_FILE_PATH = os.path.join(TEMP_PATH, "NOTICE")
os.rename(GENERATED_NOTICE_FILE_PATH, NOTICE_FILE_TEMP_PATH)
else:
if not os.path.isdir(LIB_TEMP_PATH) or not os.path.isdir(OPT_TEMP_PATH):
print("The flink core files are not found. Please make sure your installation package "
"is complete, or do this in the flink-python directory of the flink source "
"directory.")
sys.exit(-1)
exist_licenses = os.path.exists(LICENSES_TEMP_PATH)
PACKAGES = ['pyflink',
'pyflink.bin',
'pyflink.lib',
'pyflink.opt',
'pyflink.plugins']
PACKAGE_DIR = {
'pyflink.bin': TEMP_PATH + '/bin',
'pyflink.lib': TEMP_PATH + '/lib',
'pyflink.opt': TEMP_PATH + '/opt',
'pyflink.plugins': TEMP_PATH + '/plugins'}
PACKAGE_DATA = {
'pyflink': ['README.txt', 'version.py'],
'pyflink.bin': ['*.jar'],
'pyflink.lib': ['*.jar'],
'pyflink.opt': ['*.*', '*/*'],
'pyflink.plugins': ['*', '*/*']}
if exist_licenses and platform.system() != "Windows":
PACKAGES.append('pyflink.licenses')
PACKAGE_DIR['pyflink.licenses'] = TEMP_PATH + '/licenses'
PACKAGE_DATA['pyflink.licenses'] = ['*']
setup(
name='apache-flink-libraries',
version=VERSION,
packages=PACKAGES,
include_package_data=True,
package_dir=PACKAGE_DIR,
package_data=PACKAGE_DATA,
url='https://flink.apache.org',
license='https://www.apache.org/licenses/LICENSE-2.0',
author='Apache Software Foundation',
author_email='[email protected]',
python_requires='>=3.6',
description='Apache Flink Libraries',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9'],
)
finally:
if in_flink_source:
remove_if_exists(TEMP_PATH)
remove_if_exists(LICENSE_FILE_TEMP_PATH)
remove_if_exists(NOTICE_FILE_TEMP_PATH)
remove_if_exists(README_FILE_TEMP_PATH)
remove_if_exists(VERSION_FILE_TEMP_PATH)
remove_if_exists(pyflink_directory)
| 9,763 | 39.853556 | 99 |
py
|
flink
|
flink-master/flink-python/docs/conf.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import print_function
import os
import sys
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
# project = u'Flink Python Table API'
project = u'PyFlink'
copyright = u''
author = u'Author'
version_file = os.path.join("..", 'pyflink/version.py')
try:
exec(open(version_file).read())
except IOError:
print("Failed to load PyFlink version file for packaging. " +
"'%s' not found!" % version_file,
file=sys.stderr)
sys.exit(-1)
# The short X.Y version
version = __version__ # noqa
# The full version, including alpha/beta/rc tags
release = os.environ.get('RELEASE_VERSION', version)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.doctest',
'sphinx.ext.autosummary',
'sphinx_mdinclude'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Look at the first line of the docstring for function and method signatures.
autosummary_generate = True
autodoc_docstring_signature = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"collapse_navigation": True,
"navigation_depth": 0
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../docs/static/navbar-brand-logo.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = html_logo
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyflinkdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyflink.tex', u'pyflink Documentation',
[author], 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyflink', u'pyflink Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyflink', u'pyflink Documentation',
author, 'pyflink', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'pyflink'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2019, Author'
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 7,437 | 32.205357 | 80 |
py
|
flink
|
flink-master/flink-python/dev/pip_test_code.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
# test pyflink shell environment
from pyflink.table.expressions import lit
from pyflink.shell import s_env, st_env, DataTypes
from pyflink.table.schema import Schema
from pyflink.table.table_descriptor import TableDescriptor, FormatDescriptor
import tempfile
import os
import shutil
sink_path = tempfile.gettempdir() + '/batch.csv'
if os.path.exists(sink_path):
if os.path.isfile(sink_path):
os.remove(sink_path)
else:
shutil.rmtree(sink_path)
s_env.set_parallelism(1)
t = st_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])
st_env.create_temporary_table("csv_sink", TableDescriptor.for_connector("filesystem")
.schema(Schema.new_builder()
.column("a", DataTypes.BIGINT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
.option("path", sink_path)
.format(FormatDescriptor.for_format("csv")
.option("field-delimiter", ",")
.build())
.build())
t.select(t.a + lit(1), t.b, t.c).execute_insert("csv_sink").wait()
with open(os.path.join(sink_path, os.listdir(sink_path)[0]), 'r') as f:
lines = f.read()
assert lines == '2,hi,hello\n' + '3,hi,hello\n'
print('pip_test_code.py success!')
| 2,198 | 38.267857 | 85 |
py
|
flink
|
flink-master/flink-python/pyflink/serializers.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import pickle
import struct
from abc import abstractmethod
from io import BytesIO
from itertools import chain
from typing import TypeVar, Iterable
from pyflink.common.serializer import TypeSerializer
T = TypeVar('T')
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
NULL = -2
class IterableSerializer(TypeSerializer[Iterable[T]]):
def _load_from_stream_without_unbatching(self, stream: BytesIO):
"""
Returns an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.deserialize(stream))
class VarLengthDataSerializer(IterableSerializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where length is a 32-bit integer and data is length bytes.
"""
def serialize(self, iterable, stream):
for obj in iterable:
self._write_with_length(obj, stream)
def deserialize(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("Serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("Can not serialize object larger than 2G")
write_int(len(serialized), stream)
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
@abstractmethod
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
pass
@abstractmethod
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
pass
class PickleSerializer(VarLengthDataSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/3/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, 3)
def loads(self, obj):
return pickle.loads(obj, encoding="bytes")
class BatchedSerializer(IterableSerializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batch_size=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batch_size = batch_size
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batch_size)
def _batched(self, iterable):
if self.batch_size == self.UNLIMITED_BATCH_SIZE:
yield list(iterable)
elif hasattr(iterable, "__len__") and hasattr(iterable, "__getslice__"):
n = len(iterable)
for i in range(0, n, self.batch_size):
yield iterable[i: i + self.batch_size]
else:
items = []
count = 0
for item in iterable:
items.append(item)
count += 1
if count == self.batch_size:
yield items
items = []
count = 0
if items:
yield items
def serialize(self, iterator, stream):
self.serializer.serialize(self._batched(iterator), stream)
def deserialize(self, stream):
return chain.from_iterable(self._load_from_stream_without_unbatching(stream))
def _load_from_stream_without_unbatching(self, stream):
return self.serializer.deserialize(stream)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
| 5,422 | 30.52907 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/pyflink_gateway_server.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import argparse
import getpass
import glob
import os
import platform
import signal
import socket
import sys
from collections import namedtuple
from string import Template
from subprocess import Popen, PIPE
from pyflink.find_flink_home import _find_flink_home, _find_flink_source_root
KEY_ENV_LOG_DIR = "env.log.dir"
KEY_ENV_YARN_CONF_DIR = "env.yarn.conf.dir"
KEY_ENV_HADOOP_CONF_DIR = "env.hadoop.conf.dir"
KEY_ENV_HBASE_CONF_DIR = "env.hbase.conf.dir"
KEY_ENV_JAVA_HOME = "env.java.home"
KEY_ENV_JAVA_OPTS = "env.java.opts.all"
KEY_ENV_JAVA_OPTS_DEPRECATED = "env.java.opts"
def on_windows():
return platform.system() == "Windows"
def read_from_config(key, default_value, flink_conf_file):
value = default_value
# get the realpath of tainted path value to avoid CWE22 problem that constructs a path or URI
# using the tainted value and might allow an attacker to access, modify, or test the existence
# of critical or sensitive files.
with open(os.path.realpath(flink_conf_file), "r") as f:
while True:
line = f.readline()
if not line:
break
if line.startswith("#") or len(line.strip()) == 0:
continue
k, v = line.split(":", 1)
if k.strip() == key:
value = v.strip()
return value
def find_java_executable():
java_executable = "java.exe" if on_windows() else "java"
flink_home = _find_flink_home()
flink_conf_file = os.path.join(flink_home, "conf", "flink-conf.yaml")
java_home = read_from_config(KEY_ENV_JAVA_HOME, None, flink_conf_file)
if java_home is None and "JAVA_HOME" in os.environ:
java_home = os.environ["JAVA_HOME"]
if java_home is not None:
java_executable = os.path.join(java_home, "bin", java_executable)
return java_executable
def prepare_environment_variables(env):
flink_home = _find_flink_home()
# get the realpath of tainted path value to avoid CWE22 problem that constructs a path or URI
# using the tainted value and might allow an attacker to access, modify, or test the existence
# of critical or sensitive files.
real_flink_home = os.path.realpath(flink_home)
if 'FLINK_CONF_DIR' in env:
flink_conf_directory = os.path.realpath(env['FLINK_CONF_DIR'])
else:
flink_conf_directory = os.path.join(real_flink_home, "conf")
env['FLINK_CONF_DIR'] = flink_conf_directory
if 'FLINK_LIB_DIR' in env:
flink_lib_directory = os.path.realpath(env['FLINK_LIB_DIR'])
else:
flink_lib_directory = os.path.join(real_flink_home, "lib")
env['FLINK_LIB_DIR'] = flink_lib_directory
if 'FLINK_OPT_DIR' in env:
flink_opt_directory = os.path.realpath(env['FLINK_OPT_DIR'])
else:
flink_opt_directory = os.path.join(real_flink_home, "opt")
env['FLINK_OPT_DIR'] = flink_opt_directory
if 'FLINK_PLUGINS_DIR' in env:
flink_plugins_directory = os.path.realpath(env['FLINK_PLUGINS_DIR'])
else:
flink_plugins_directory = os.path.join(real_flink_home, "plugins")
env['FLINK_PLUGINS_DIR'] = flink_plugins_directory
env["FLINK_BIN_DIR"] = os.path.join(real_flink_home, "bin")
def construct_log_settings(env):
templates = [
"-Dlog.file=${flink_log_dir}/flink-${flink_ident_string}-python-${hostname}.log",
"-Dlog4j.configuration=${log4j_properties}",
"-Dlog4j.configurationFile=${log4j_properties}",
"-Dlogback.configurationFile=${logback_xml}"
]
flink_home = os.path.realpath(_find_flink_home())
flink_conf_dir = env['FLINK_CONF_DIR']
flink_conf_file = os.path.join(env['FLINK_CONF_DIR'], "flink-conf.yaml")
if "FLINK_LOG_DIR" in env:
flink_log_dir = env["FLINK_LOG_DIR"]
else:
flink_log_dir = read_from_config(
KEY_ENV_LOG_DIR, os.path.join(flink_home, "log"), flink_conf_file)
if "LOG4J_PROPERTIES" in env:
log4j_properties = env["LOG4J_PROPERTIES"]
else:
log4j_properties = "%s/log4j-cli.properties" % flink_conf_dir
if "LOGBACK_XML" in env:
logback_xml = env["LOGBACK_XML"]
else:
logback_xml = "%s/logback.xml" % flink_conf_dir
if "FLINK_IDENT_STRING" in env:
flink_ident_string = env["FLINK_IDENT_STRING"]
else:
flink_ident_string = getpass.getuser()
hostname = socket.gethostname()
log_settings = []
for template in templates:
log_settings.append(Template(template).substitute(
log4j_properties=log4j_properties,
logback_xml=logback_xml,
flink_log_dir=flink_log_dir,
flink_ident_string=flink_ident_string,
hostname=hostname))
return log_settings
def get_jvm_opts(env):
flink_conf_file = os.path.join(env['FLINK_CONF_DIR'], "flink-conf.yaml")
jvm_opts = env.get(
'FLINK_ENV_JAVA_OPTS',
read_from_config(
KEY_ENV_JAVA_OPTS,
read_from_config(KEY_ENV_JAVA_OPTS_DEPRECATED, "", flink_conf_file),
flink_conf_file))
# Remove leading and ending double quotes (if present) of value
jvm_opts = jvm_opts.strip("\"")
return jvm_opts.split(" ")
def construct_flink_classpath(env):
flink_home = _find_flink_home()
flink_lib_directory = env['FLINK_LIB_DIR']
flink_opt_directory = env['FLINK_OPT_DIR']
if on_windows():
# The command length is limited on Windows. To avoid the problem we should shorten the
# command length as much as possible.
lib_jars = os.path.join(flink_lib_directory, "*")
else:
lib_jars = os.pathsep.join(glob.glob(os.path.join(flink_lib_directory, "*.jar")))
flink_python_jars = glob.glob(os.path.join(flink_opt_directory, "flink-python*.jar"))
if len(flink_python_jars) < 1:
print("The flink-python jar is not found in the opt folder of the FLINK_HOME: %s" %
flink_home)
return lib_jars
flink_python_jar = flink_python_jars[0]
return os.pathsep.join([lib_jars, flink_python_jar])
def construct_hadoop_classpath(env):
flink_conf_file = os.path.join(env['FLINK_CONF_DIR'], "flink-conf.yaml")
hadoop_conf_dir = ""
if 'HADOOP_CONF_DIR' not in env and 'HADOOP_CLASSPATH' not in env:
if os.path.isdir("/etc/hadoop/conf"):
print("Setting HADOOP_CONF_DIR=/etc/hadoop/conf because no HADOOP_CONF_DIR or"
"HADOOP_CLASSPATH was set.")
hadoop_conf_dir = "/etc/hadoop/conf"
hbase_conf_dir = ""
if 'HBASE_CONF_DIR' not in env:
if os.path.isdir("/etc/hbase/conf"):
print("Setting HBASE_CONF_DIR=/etc/hbase/conf because no HBASE_CONF_DIR was set.")
hbase_conf_dir = "/etc/hbase/conf"
return os.pathsep.join(
[env.get("HADOOP_CLASSPATH", ""),
env.get("YARN_CONF_DIR",
read_from_config(KEY_ENV_YARN_CONF_DIR, "", flink_conf_file)),
env.get("HADOOP_CONF_DIR",
read_from_config(KEY_ENV_HADOOP_CONF_DIR, hadoop_conf_dir, flink_conf_file)),
env.get("HBASE_CONF_DIR",
read_from_config(KEY_ENV_HBASE_CONF_DIR, hbase_conf_dir, flink_conf_file))])
def construct_test_classpath():
test_jar_patterns = [
"flink-python/target/test-dependencies/*",
"flink-python/target/artifacts/testDataStream.jar",
"flink-python/target/flink-python*-tests.jar",
]
test_jars = []
flink_source_root = _find_flink_source_root()
for pattern in test_jar_patterns:
pattern = pattern.replace("/", os.path.sep)
test_jars += glob.glob(os.path.join(flink_source_root, pattern))
return os.path.pathsep.join(test_jars)
def construct_program_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--class", required=True)
parser.add_argument("cluster_type", choices=["local", "remote", "yarn"])
parse_result, other_args = parser.parse_known_args(args)
main_class = getattr(parse_result, "class")
cluster_type = parse_result.cluster_type
return namedtuple(
"ProgramArgs", ["main_class", "cluster_type", "other_args"])(
main_class, cluster_type, other_args)
def launch_gateway_server_process(env, args):
prepare_environment_variables(env)
program_args = construct_program_args(args)
if program_args.cluster_type == "local":
java_executable = find_java_executable()
log_settings = construct_log_settings(env)
jvm_args = env.get('JVM_ARGS', '')
jvm_opts = get_jvm_opts(env)
classpath = os.pathsep.join(
[construct_flink_classpath(env), construct_hadoop_classpath(env)])
if "FLINK_TESTING" in env:
classpath = os.pathsep.join([classpath, construct_test_classpath()])
command = [java_executable, jvm_args, "-XX:+IgnoreUnrecognizedVMOptions",
"--add-opens=jdk.proxy2/jdk.proxy2=ALL-UNNAMED"] \
+ jvm_opts + log_settings \
+ ["-cp", classpath, program_args.main_class] + program_args.other_args
else:
command = [os.path.join(env["FLINK_BIN_DIR"], "flink"), "run"] + program_args.other_args \
+ ["-c", program_args.main_class]
preexec_fn = None
if not on_windows():
def preexec_func():
# ignore ctrl-c / SIGINT
signal.signal(signal.SIGINT, signal.SIG_IGN)
preexec_fn = preexec_func
return Popen(list(filter(lambda c: len(c) != 0, command)),
stdin=PIPE, stderr=PIPE, preexec_fn=preexec_fn, env=env)
if __name__ == "__main__":
launch_gateway_server_process(os.environ, sys.argv[1:])
| 10,611 | 37.449275 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/gen_protos.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from __future__ import absolute_import
from __future__ import print_function
import glob
import logging
import multiprocessing
import os
import platform
import shutil
import subprocess
import sys
import time
import warnings
import pkg_resources
GRPC_TOOLS = 'grpcio-tools>=1.29.0,<=1.46.3'
PROTO_PATHS = ['proto']
PYFLINK_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
DEFAULT_PYTHON_OUTPUT_PATH = os.path.join(PYFLINK_ROOT_PATH, 'fn_execution')
def generate_proto_files(force=True, output_dir=DEFAULT_PYTHON_OUTPUT_PATH):
try:
import grpc_tools # noqa # pylint: disable=unused-import
except ImportError:
warnings.warn('Installing grpcio-tools is recommended for development.')
proto_dirs = [os.path.join(PYFLINK_ROOT_PATH, path) for path in PROTO_PATHS]
proto_files = sum(
[glob.glob(os.path.join(d, '*.proto')) for d in proto_dirs], [])
out_dir = os.path.join(PYFLINK_ROOT_PATH, output_dir)
out_files = [path for path in glob.glob(os.path.join(out_dir, '*_pb2.py'))]
if out_files and not proto_files and not force:
# We have out_files but no protos; assume they're up to date.
# This is actually the common case (e.g. installation from an sdist).
logging.info('No proto files; using existing generated files.')
return
elif not out_files and not proto_files:
raise RuntimeError(
'No proto files found in %s.' % proto_dirs)
# Regenerate iff the proto files or this file are newer.
elif force or not out_files or len(out_files) < len(proto_files) or (
min(os.path.getmtime(path) for path in out_files)
<= max(os.path.getmtime(path)
for path in proto_files + [os.path.realpath(__file__)])):
try:
from grpc_tools import protoc
except ImportError:
if platform.system() == 'Windows':
# For Windows, grpcio-tools has to be installed manually.
raise RuntimeError(
'Cannot generate protos for Windows since grpcio-tools package is '
'not installed. Please install this package manually '
'using \'pip install "grpcio-tools>=1.29.0,<=1.46.3"\'.')
# Use a subprocess to avoid messing with this process' path and imports.
# Note that this requires a separate module from setup.py for Windows:
# https://docs.python.org/2/library/multiprocessing.html#windows
p = multiprocessing.Process(
target=_install_grpcio_tools_and_generate_proto_files(force, output_dir))
p.start()
p.join()
if p.exitcode:
raise ValueError("Proto generation failed (see log for details).")
else:
_check_grpcio_tools_version()
logging.info('Regenerating out-of-date Python proto definitions.')
builtin_protos = pkg_resources.resource_filename('grpc_tools', '_proto')
args = (
[sys.executable] + # expecting to be called from command line
['--proto_path=%s' % builtin_protos] +
['--proto_path=%s' % d for d in proto_dirs] +
['--python_out=%s' % out_dir] +
proto_files)
ret_code = protoc.main(args)
if ret_code:
raise RuntimeError(
'Protoc returned non-zero status (see logs for details): '
'%s' % ret_code)
for output_file in os.listdir(output_dir):
if output_file.endswith('_pb2.py'):
_add_license_header(output_dir, output_file)
# Though wheels are available for grpcio-tools, setup_requires uses
# easy_install which doesn't understand them. This means that it is
# compiled from scratch (which is expensive as it compiles the full
# protoc compiler). Instead, we attempt to install a wheel in a temporary
# directory and add it to the path as needed.
# See https://github.com/pypa/setuptools/issues/377
def _install_grpcio_tools_and_generate_proto_files(force, output_dir):
install_path = os.path.join(PYFLINK_ROOT_PATH, '..', '.eggs', 'grpcio-wheels')
build_path = install_path + '-build'
if os.path.exists(build_path):
shutil.rmtree(build_path)
logging.warning('Installing grpcio-tools into %s', install_path)
try:
start = time.time()
# since '--prefix' option is only supported for pip 8.0+, so here we fallback to
# use '--install-option' when the pip version is lower than 8.0.0.
pip_version = pkg_resources.get_distribution("pip").version
from pkg_resources import parse_version
if parse_version(pip_version) >= parse_version('8.0.0'):
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install',
'--prefix', install_path, '--build', build_path,
'--upgrade', GRPC_TOOLS, "-I"])
else:
subprocess.check_call(
[sys.executable, '-m', 'pip', 'install',
'--install-option', '--prefix=' + install_path, '--build', build_path,
'--upgrade', GRPC_TOOLS, "-I"])
from distutils.dist import Distribution
install_obj = Distribution().get_command_obj('install', create=True)
install_obj.prefix = install_path
install_obj.finalize_options()
logging.warning(
'Installing grpcio-tools took %0.2f seconds.', time.time() - start)
finally:
sys.stderr.flush()
shutil.rmtree(build_path, ignore_errors=True)
sys.path.append(install_obj.install_purelib)
pkg_resources.working_set.add_entry(install_obj.install_purelib)
if install_obj.install_purelib != install_obj.install_platlib:
sys.path.append(install_obj.install_platlib)
pkg_resources.working_set.add_entry(install_obj.install_platlib)
try:
generate_proto_files(force, output_dir)
finally:
sys.stderr.flush()
def _add_license_header(dir, file_name):
with open(os.path.join(dir, file_name), 'r') as original_file:
original_data = original_file.read()
tmp_file_name = file_name + '.tmp'
with open(os.path.join(dir, tmp_file_name), 'w') as tmp_file:
tmp_file.write(
'################################################################################\n'
'# Licensed to the Apache Software Foundation (ASF) under one\n'
'# or more contributor license agreements. See the NOTICE file\n'
'# distributed with this work for additional information\n'
'# regarding copyright ownership. The ASF licenses this file\n'
'# to you under the Apache License, Version 2.0 (the\n'
'# "License"); you may not use this file except in compliance\n'
'# with the License. You may obtain a copy of the License at\n'
'#\n'
'# http://www.apache.org/licenses/LICENSE-2.0\n'
'#\n'
'# Unless required by applicable law or agreed to in writing, software\n'
'# distributed under the License is distributed on an "AS IS" BASIS,\n'
'# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
'# See the License for the specific language governing permissions and\n'
'# limitations under the License.\n'
'################################################################################\n'
)
tmp_file.write(original_data)
if os.path.exists(os.path.join(dir, file_name)):
os.remove(os.path.join(dir, file_name))
os.rename(os.path.join(dir, tmp_file_name), os.path.join(dir, file_name))
def _check_grpcio_tools_version():
version = pkg_resources.get_distribution("grpcio-tools").parsed_version
from pkg_resources import parse_version
if version < parse_version('1.29.0') or version > parse_version('1.46.3'):
raise RuntimeError(
"Version of grpcio-tools must be between 1.29.0 and 1.46.3, got %s" % version)
if __name__ == '__main__':
generate_proto_files()
| 9,238 | 46.137755 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/java_gateway.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import importlib
import os
import shlex
import shutil
import struct
import tempfile
import time
from logging import WARN
from threading import RLock
from py4j.java_gateway import (java_import, logger, JavaGateway, GatewayParameters,
CallbackServerParameters)
from pyflink.find_flink_home import _find_flink_home
from pyflink.pyflink_gateway_server import launch_gateway_server_process
from pyflink.util.exceptions import install_exception_handler, install_py4j_hooks
_gateway = None
_lock = RLock()
def is_launch_gateway_disabled():
if "PYFLINK_GATEWAY_DISABLED" in os.environ \
and os.environ["PYFLINK_GATEWAY_DISABLED"].lower() not in ["0", "false", ""]:
return True
else:
return False
def get_gateway():
# type: () -> JavaGateway
global _gateway
global _lock
with _lock:
if _gateway is None:
# Set the level to WARN to mute the noisy INFO level logs
logger.level = WARN
# if Java Gateway is already running
if 'PYFLINK_GATEWAY_PORT' in os.environ:
gateway_port = int(os.environ['PYFLINK_GATEWAY_PORT'])
gateway_param = GatewayParameters(port=gateway_port, auto_convert=True)
_gateway = JavaGateway(
gateway_parameters=gateway_param,
callback_server_parameters=CallbackServerParameters(
port=0, daemonize=True, daemonize_connections=True))
else:
_gateway = launch_gateway()
callback_server = _gateway.get_callback_server()
callback_server_listening_address = callback_server.get_listening_address()
callback_server_listening_port = callback_server.get_listening_port()
_gateway.jvm.org.apache.flink.client.python.PythonEnvUtils.resetCallbackClient(
_gateway.java_gateway_server,
callback_server_listening_address,
callback_server_listening_port)
# import the flink view
import_flink_view(_gateway)
install_exception_handler()
install_py4j_hooks()
_gateway.entry_point.put("PythonFunctionFactory", PythonFunctionFactory())
_gateway.entry_point.put("Watchdog", Watchdog())
return _gateway
def launch_gateway():
# type: () -> JavaGateway
"""
launch jvm gateway
"""
if is_launch_gateway_disabled():
raise Exception("It's launching the PythonGatewayServer during Python UDF execution "
"which is unexpected. It usually happens when the job codes are "
"in the top level of the Python script file and are not enclosed in a "
"`if name == 'main'` statement.")
args = ['-c', 'org.apache.flink.client.python.PythonGatewayServer']
submit_args = os.environ.get("SUBMIT_ARGS", "local")
args += shlex.split(submit_args)
# Create a temporary directory where the gateway server should write the connection information.
conn_info_dir = tempfile.mkdtemp()
try:
fd, conn_info_file = tempfile.mkstemp(dir=conn_info_dir)
os.close(fd)
os.unlink(conn_info_file)
_find_flink_home()
env = dict(os.environ)
env["_PYFLINK_CONN_INFO_PATH"] = conn_info_file
p = launch_gateway_server_process(env, args)
while not p.poll() and not os.path.isfile(conn_info_file):
time.sleep(0.1)
if not os.path.isfile(conn_info_file):
stderr_info = p.stderr.read().decode('utf-8')
raise RuntimeError(
"Java gateway process exited before sending its port number.\nStderr:\n"
+ stderr_info
)
with open(conn_info_file, "rb") as info:
gateway_port = struct.unpack("!I", info.read(4))[0]
finally:
shutil.rmtree(conn_info_dir)
# Connect to the gateway
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=gateway_port, auto_convert=True),
callback_server_parameters=CallbackServerParameters(
port=0, daemonize=True, daemonize_connections=True))
return gateway
def import_flink_view(gateway):
"""
import the classes used by PyFlink.
:param gateway:gateway connected to JavaGateWayServer
"""
# Import the classes used by PyFlink
java_import(gateway.jvm, "org.apache.flink.table.api.*")
java_import(gateway.jvm, "org.apache.flink.table.api.config.*")
java_import(gateway.jvm, "org.apache.flink.table.api.java.*")
java_import(gateway.jvm, "org.apache.flink.table.api.bridge.java.*")
java_import(gateway.jvm, "org.apache.flink.table.api.dataview.*")
java_import(gateway.jvm, "org.apache.flink.table.catalog.*")
java_import(gateway.jvm, "org.apache.flink.table.descriptors.*")
java_import(gateway.jvm, "org.apache.flink.table.descriptors.python.*")
java_import(gateway.jvm, "org.apache.flink.table.expressions.*")
java_import(gateway.jvm, "org.apache.flink.table.sources.*")
java_import(gateway.jvm, "org.apache.flink.table.sinks.*")
java_import(gateway.jvm, "org.apache.flink.table.sources.*")
java_import(gateway.jvm, "org.apache.flink.table.types.*")
java_import(gateway.jvm, "org.apache.flink.table.types.logical.*")
java_import(gateway.jvm, "org.apache.flink.table.util.python.*")
java_import(gateway.jvm, "org.apache.flink.api.common.python.*")
java_import(gateway.jvm, "org.apache.flink.api.common.typeinfo.TypeInformation")
java_import(gateway.jvm, "org.apache.flink.api.common.typeinfo.Types")
java_import(gateway.jvm, "org.apache.flink.api.java.ExecutionEnvironment")
java_import(gateway.jvm,
"org.apache.flink.streaming.api.environment.StreamExecutionEnvironment")
java_import(gateway.jvm, "org.apache.flink.api.common.restartstrategy.RestartStrategies")
java_import(gateway.jvm, "org.apache.flink.python.util.PythonDependencyUtils")
java_import(gateway.jvm, "org.apache.flink.python.PythonOptions")
java_import(gateway.jvm, "org.apache.flink.client.python.PythonGatewayServer")
java_import(gateway.jvm, "org.apache.flink.streaming.api.functions.python.*")
java_import(gateway.jvm, "org.apache.flink.streaming.api.operators.python.process.*")
java_import(gateway.jvm, "org.apache.flink.streaming.api.operators.python.embedded.*")
java_import(gateway.jvm, "org.apache.flink.streaming.api.typeinfo.python.*")
class PythonFunctionFactory(object):
"""
Used to create PythonFunction objects for Java jobs.
"""
def getPythonFunction(self, moduleName, objectName):
udf_wrapper = getattr(importlib.import_module(moduleName), objectName)
return udf_wrapper._java_user_defined_function()
class Java:
implements = ["org.apache.flink.client.python.PythonFunctionFactory"]
class Watchdog(object):
"""
Used to provide to Java side to check whether its parent process is alive.
"""
def ping(self):
time.sleep(10)
return True
class Java:
implements = ["org.apache.flink.client.python.PythonGatewayServer$Watchdog"]
| 8,189 | 40.785714 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/shell.py
|
#!/usr/bin/env python
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import platform
import sys
from pyflink.common import *
from pyflink.datastream import *
from pyflink.table import *
from pyflink.table.catalog import *
from pyflink.table.descriptors import *
from pyflink.table.window import *
from pyflink.metrics import *
utf8_out = open(sys.stdout.fileno(), mode='w', encoding='utf8', buffering=1)
print("Using Python version %s (%s, %s)" % (
platform.python_version(),
platform.python_build()[0],
platform.python_build()[1]))
welcome_msg = u'''
\u2592\u2593\u2588\u2588\u2593\u2588\u2588\u2592
\u2593\u2588\u2588\u2588\u2588\u2592\u2592\u2588\u2593\u2592\u2593\u2588\u2588\u2588\u2593\u2592
\u2593\u2588\u2588\u2588\u2593\u2591\u2591 \u2592\u2592\u2592\u2593\u2588\u2588\u2592 \u2592
\u2591\u2588\u2588\u2592 \u2592\u2592\u2593\u2593\u2588\u2593\u2593\u2592\u2591 \u2592\u2588\u2588\u2588\u2588
\u2588\u2588\u2592 \u2591\u2592\u2593\u2588\u2588\u2588\u2592 \u2592\u2588\u2592\u2588\u2592
\u2591\u2593\u2588 \u2588\u2588\u2588 \u2593\u2591\u2592\u2588\u2588
\u2593\u2588 \u2592\u2592\u2592\u2592\u2592\u2593\u2588\u2588\u2593\u2591\u2592\u2591\u2593\u2593\u2588
\u2588\u2591 \u2588 \u2592\u2592\u2591 \u2588\u2588\u2588\u2593\u2593\u2588 \u2592\u2588\u2592\u2592\u2592
\u2588\u2588\u2588\u2588\u2591 \u2592\u2593\u2588\u2593 \u2588\u2588\u2592\u2592\u2592 \u2593\u2588\u2588\u2588\u2592
\u2591\u2592\u2588\u2593\u2593\u2588\u2588 \u2593\u2588\u2592 \u2593\u2588\u2592\u2593\u2588\u2588\u2593 \u2591\u2588\u2591
\u2593\u2591\u2592\u2593\u2588\u2588\u2588\u2588\u2592 \u2588\u2588 \u2592\u2588 \u2588\u2593\u2591\u2592\u2588\u2592\u2591\u2592\u2588\u2592
\u2588\u2588\u2588\u2593\u2591\u2588\u2588\u2593 \u2593\u2588 \u2588 \u2588\u2593 \u2592\u2593\u2588\u2593\u2593\u2588\u2592
\u2591\u2588\u2588\u2593 \u2591\u2588\u2591 \u2588 \u2588\u2592 \u2592\u2588\u2588\u2588\u2588\u2588\u2593\u2592 \u2588\u2588\u2593\u2591\u2592
\u2588\u2588\u2588\u2591 \u2591 \u2588\u2591 \u2593 \u2591\u2588 \u2588\u2588\u2588\u2588\u2588\u2592\u2591\u2591 \u2591\u2588\u2591\u2593 \u2593\u2591
\u2588\u2588\u2593\u2588 \u2592\u2592\u2593\u2592 \u2593\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2593\u2591 \u2592\u2588\u2592 \u2592\u2593 \u2593\u2588\u2588\u2593
\u2592\u2588\u2588\u2593 \u2593\u2588 \u2588\u2593\u2588 \u2591\u2592\u2588\u2588\u2588\u2588\u2588\u2593\u2593\u2592\u2591 \u2588\u2588\u2592\u2592 \u2588 \u2592 \u2593\u2588\u2592
\u2593\u2588\u2593 \u2593\u2588 \u2588\u2588\u2593 \u2591\u2593\u2593\u2593\u2593\u2593\u2593\u2593\u2592 \u2592\u2588\u2588\u2593 \u2591\u2588\u2592
\u2593\u2588 \u2588 \u2593\u2588\u2588\u2588\u2593\u2592\u2591 \u2591\u2593\u2593\u2593\u2588\u2588\u2588\u2593 \u2591\u2592\u2591 \u2593\u2588
\u2588\u2588\u2593 \u2588\u2588\u2592 \u2591\u2592\u2593\u2593\u2588\u2588\u2588\u2593\u2593\u2593\u2593\u2593\u2588\u2588\u2588\u2588\u2588\u2588\u2593\u2592 \u2593\u2588\u2588\u2588 \u2588
\u2593\u2588\u2588\u2588\u2592 \u2588\u2588\u2588 \u2591\u2593\u2593\u2592\u2591\u2591 \u2591\u2593\u2588\u2588\u2588\u2588\u2593\u2591 \u2591\u2592\u2593\u2592 \u2588\u2593
\u2588\u2593\u2592\u2592\u2593\u2593\u2588\u2588 \u2591\u2592\u2592\u2591\u2591\u2591\u2592\u2592\u2592\u2592\u2593\u2588\u2588\u2593\u2591 \u2588\u2593
\u2588\u2588 \u2593\u2591\u2592\u2588 \u2593\u2593\u2593\u2593\u2592\u2591\u2591 \u2592\u2588\u2593 \u2592\u2593\u2593\u2588\u2588\u2593 \u2593\u2592 \u2592\u2592\u2593
\u2593\u2588\u2593 \u2593\u2592\u2588 \u2588\u2593\u2591 \u2591\u2592\u2593\u2593\u2588\u2588\u2592 \u2591\u2593\u2588\u2592 \u2592\u2592\u2592\u2591\u2592\u2592\u2593\u2588\u2588\u2588\u2588\u2588\u2592
\u2588\u2588\u2591 \u2593\u2588\u2592\u2588\u2592 \u2592\u2593\u2593\u2592 \u2593\u2588 \u2588\u2591 \u2591\u2591\u2591\u2591 \u2591\u2588\u2592
\u2593\u2588 \u2592\u2588\u2593 \u2591 \u2588\u2591 \u2592\u2588 \u2588\u2593
\u2588\u2593 \u2588\u2588 \u2588\u2591 \u2593\u2593 \u2592\u2588\u2593\u2593\u2593\u2592\u2588\u2591
\u2588\u2593 \u2591\u2593\u2588\u2588\u2591 \u2593\u2592 \u2593\u2588\u2593\u2592\u2591\u2591\u2591\u2592\u2593\u2588\u2591 \u2592\u2588
\u2588\u2588 \u2593\u2588\u2593\u2591 \u2592 \u2591\u2592\u2588\u2592\u2588\u2588\u2592 \u2593\u2593
\u2593\u2588\u2592 \u2592\u2588\u2593\u2592\u2591 \u2592\u2592 \u2588\u2592\u2588\u2593\u2592\u2592\u2591\u2591\u2592\u2588\u2588
\u2591\u2588\u2588\u2592 \u2592\u2593\u2593\u2592 \u2593\u2588\u2588\u2593\u2592\u2588\u2592 \u2591\u2593\u2593\u2593\u2593\u2592\u2588\u2593
\u2591\u2593\u2588\u2588\u2592 \u2593\u2591 \u2592\u2588\u2593\u2588 \u2591\u2591\u2592\u2592\u2592
\u2592\u2593\u2593\u2593\u2593\u2593\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2592\u2591\u2591\u2593\u2593 \u2593\u2591\u2592\u2588\u2591
F L I N K - P Y T H O N - S H E L L
NOTE: Use the prebound Table Environment to implement batch or streaming Table programs.
Streaming - Use 's_env' and 'st_env' variables
```
import os
import shutil
import tempfile
sink_path = tempfile.gettempdir() + '/streaming.csv'
if os.path.exists(sink_path):
if os.path.isfile(sink_path):
os.remove(sink_path)
else:
shutil.rmtree(sink_path)
s_env.set_parallelism(1)
t = st_env.from_elements([(1, 'hi', 'hello'), (2, 'hi', 'hello')], ['a', 'b', 'c'])
st_env.create_temporary_table("stream_sink", TableDescriptor.for_connector("filesystem")
.schema(Schema.new_builder()
.column("a", DataTypes.BIGINT())
.column("b", DataTypes.STRING())
.column("c", DataTypes.STRING())
.build())
.option("path", sink_path)
.format(FormatDescriptor.for_format("csv")
.option("field-delimiter", ",")
.build())
.build())
t.select(col('a') + 1, col('b'), col('c')).insert_into("stream_sink")
st_env.execute("stream_job")
# show the results
with open(os.path.join(sink_path, os.listdir(sink_path)[0]), 'r') as f:
print(f.read())
```
'''
utf8_out.write(welcome_msg)
s_env = StreamExecutionEnvironment.get_execution_environment()
st_env = StreamTableEnvironment.create(s_env)
| 8,093 | 67.59322 | 242 |
py
|
flink
|
flink-master/flink-python/pyflink/version.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
The pyflink version will be consistent with the flink version and follow the PEP440.
.. seealso:: https://www.python.org/dev/peps/pep-0440
"""
__version__ = "1.18.dev0"
| 1,132 | 46.208333 | 84 |
py
|
flink
|
flink-master/flink-python/pyflink/find_flink_home.py
|
#!/usr/bin/env python
#################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import glob
import logging
import os
import sys
def _is_flink_home(path):
flink_script_file = path + "/bin/flink"
if len(glob.glob(flink_script_file)) > 0:
return True
else:
return False
def _is_apache_flink_libraries_home(path):
flink_dist_jar_file = path + "/lib/flink-dist*.jar"
if len(glob.glob(flink_dist_jar_file)) > 0:
return True
else:
return False
def _find_flink_home():
"""
Find the FLINK_HOME.
"""
# If the environment has set FLINK_HOME, trust it.
if 'FLINK_HOME' in os.environ:
return os.environ['FLINK_HOME']
else:
try:
current_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
flink_root_dir = os.path.abspath(current_dir + "/../../")
build_target = glob.glob(flink_root_dir + "/flink-dist/target/flink-*-bin/flink-*")
if len(build_target) > 0 and _is_flink_home(build_target[0]):
os.environ['FLINK_HOME'] = build_target[0]
return build_target[0]
FLINK_HOME = None
for module_home in __import__('pyflink').__path__:
if _is_apache_flink_libraries_home(module_home):
os.environ['FLINK_LIB_DIR'] = os.path.join(module_home, 'lib')
os.environ['FLINK_PLUGINS_DIR'] = os.path.join(module_home, 'plugins')
os.environ['FLINK_OPT_DIR'] = os.path.join(module_home, 'opt')
if _is_flink_home(module_home):
FLINK_HOME = module_home
if FLINK_HOME is not None:
os.environ['FLINK_HOME'] = FLINK_HOME
return FLINK_HOME
except Exception:
pass
logging.error("Could not find valid FLINK_HOME(Flink distribution directory) "
"in current environment.")
sys.exit(-1)
def _find_flink_source_root():
"""
Find the flink source root directory.
"""
try:
return os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + "/../../")
except Exception:
pass
logging.error("Could not find valid flink source root directory in current environment.")
sys.exit(-1)
if __name__ == "__main__":
print(_find_flink_home())
| 3,270 | 35.752809 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
import sys
from functools import wraps
__path__ = __import__("pkgutil").extend_path(__path__, __name__) # type: ignore
if sys.version_info < (3, 6):
raise RuntimeError(
'Python versions prior to 3.6 are not supported for PyFlink [' +
str(sys.version_info) + '].')
def keyword(func):
"""
A decorator that forces keyword arguments usage and store actual
input keyword arguments in `_input_kwargs`.
"""
@wraps(func)
def wrapper(self, **kwargs):
self._input_kwargs = kwargs
return func(self, **kwargs)
return wrapper
def add_version_doc(f, version):
"""
Annotates a function to append the version the function was added.
"""
import re
indent_p = re.compile(r'\n( *)[^\n ]')
original_doc = f.__doc__ or ""
indents = indent_p.findall(original_doc)
indent = ' ' * (min(len(indent) for indent in indents) if indents else 0)
f.__doc__ = original_doc.rstrip() + "\n\n%s.. versionadded:: %s" % (indent, version)
| 1,969 | 36.169811 | 88 |
py
|
flink
|
flink-master/flink-python/pyflink/pyflink_callback_server.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import time
from pyflink.java_gateway import get_gateway
if __name__ == '__main__':
# just a daemon process used to serve the rpc call from Java.
gateway = get_gateway()
watchdog = gateway.jvm.org.apache.flink.client.python.PythonGatewayServer.watchdog
try:
while watchdog.ping():
time.sleep(1)
finally:
get_gateway().close()
exit(0)
| 1,351 | 39.969697 | 86 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/checkpointing_mode.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.java_gateway import get_gateway
__all__ = ['CheckpointingMode']
class CheckpointingMode(Enum):
"""
The checkpointing mode defines what consistency guarantees the system gives in the presence of
failures.
When checkpointing is activated, the data streams are replayed such that lost parts of the
processing are repeated. For stateful operations and functions, the checkpointing mode defines
whether the system draws checkpoints such that a recovery behaves as if the operators/functions
see each record "exactly once" (:data:`CheckpointingMode.EXACTLY_ONCE`), or whether the
checkpoints are drawn in a simpler fashion that typically encounters some duplicates upon
recovery (:data:`CheckpointingMode.AT_LEAST_ONCE`)
:data:`EXACTLY_ONCE`:
Sets the checkpointing mode to "exactly once". This mode means that the system will
checkpoint the operator and user function state in such a way that, upon recovery,
every record will be reflected exactly once in the operator state.
For example, if a user function counts the number of elements in a stream,
this number will consistently be equal to the number of actual elements in the stream,
regardless of failures and recovery.
Note that this does not mean that each record flows through the streaming data flow
only once. It means that upon recovery, the state of operators/functions is restored such
that the resumed data streams pick up exactly at after the last modification to the state.
Note that this mode does not guarantee exactly-once behavior in the interaction with
external systems (only state in Flink's operators and user functions). The reason for that
is that a certain level of "collaboration" is required between two systems to achieve
exactly-once guarantees. However, for certain systems, connectors can be written that
facilitate this collaboration.
This mode sustains high throughput. Depending on the data flow graph and operations,
this mode may increase the record latency, because operators need to align their input
streams, in order to create a consistent snapshot point. The latency increase for simple
dataflows (no repartitioning) is negligible. For simple dataflows with repartitioning, the
average latency remains small, but the slowest records typically have an increased latency.
:data:`AT_LEAST_ONCE`:
Sets the checkpointing mode to "at least once". This mode means that the system will
checkpoint the operator and user function state in a simpler way. Upon failure and recovery,
some records may be reflected multiple times in the operator state.
For example, if a user function counts the number of elements in a stream,
this number will equal to, or larger, than the actual number of elements in the stream,
in the presence of failure and recovery.
This mode has minimal impact on latency and may be preferable in very-low latency
scenarios, where a sustained very-low latency (such as few milliseconds) is needed,
and where occasional duplicate messages (on recovery) do not matter.
"""
EXACTLY_ONCE = 0
AT_LEAST_ONCE = 1
@staticmethod
def _from_j_checkpointing_mode(j_checkpointing_mode) -> 'CheckpointingMode':
return CheckpointingMode[j_checkpointing_mode.name()]
def _to_j_checkpointing_mode(self):
gateway = get_gateway()
JCheckpointingMode = \
gateway.jvm.org.apache.flink.streaming.api.CheckpointingMode
return getattr(JCheckpointingMode, self.name)
| 4,575 | 49.844444 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/window.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import math
from abc import ABC, abstractmethod
from enum import Enum
from io import BytesIO
from typing import TypeVar, Generic, Iterable, Collection, Any, cast, Optional
from pyflink.common import Time, Types
from pyflink.common.constants import MAX_LONG_VALUE, MIN_LONG_VALUE
from pyflink.common.serializer import TypeSerializer
from pyflink.datastream.functions import RuntimeContext, InternalWindowFunction, ReduceFunction
from pyflink.datastream.output_tag import OutputTag
from pyflink.datastream.state import StateDescriptor, ReducingStateDescriptor, \
ValueStateDescriptor, State, ReducingState
from pyflink.metrics import MetricGroup
__all__ = ['Window',
'TimeWindow',
'CountWindow',
'GlobalWindow',
'WindowAssigner',
'TumblingProcessingTimeWindows',
'TumblingEventTimeWindows',
'SlidingProcessingTimeWindows',
'SlidingEventTimeWindows',
'ProcessingTimeSessionWindows',
'EventTimeSessionWindows',
'DynamicProcessingTimeSessionWindows',
'DynamicEventTimeSessionWindows',
'GlobalWindows',
'MergingWindowAssigner',
'CountTumblingWindowAssigner',
'CountSlidingWindowAssigner',
'TriggerResult',
'Trigger',
'EventTimeTrigger',
'ProcessingTimeTrigger',
'ContinuousEventTimeTrigger',
'ContinuousProcessingTimeTrigger',
'NeverTrigger',
'PurgingTrigger',
'CountTrigger',
'TimeWindowSerializer',
'CountWindowSerializer',
'GlobalWindowSerializer',
'SessionWindowTimeGapExtractor']
def long_to_int_with_bit_mixing(x: int) -> int:
x = (x ^ (x >> 30)) * 0xbf58476d1ce4e5b9
x = (x ^ (x >> 27)) * 0x94d049bb133111eb
x = x ^ (x >> 31)
return x
def mod_inverse(x: int) -> int:
inverse = x * x * x
inverse *= 2 - x * inverse
inverse *= 2 - x * inverse
inverse *= 2 - x * inverse
return inverse
class Window(ABC):
"""
Window is a grouping of elements into finite buckets. Windows have a maximum timestamp
which means that, at some point, all elements that go into one window will have arrived.
"""
@abstractmethod
def max_timestamp(self) -> int:
pass
class TimeWindow(Window):
"""
Window that represents a time interval from start (inclusive) to end (exclusive).
"""
def __init__(self, start: int, end: int):
super(TimeWindow, self).__init__()
self.start = start
self.end = end
def max_timestamp(self) -> int:
return self.end - 1
def intersects(self, other: 'TimeWindow') -> bool:
"""
Returns True if this window intersects the given window.
"""
return self.start <= other.end and self.end >= other.start
def cover(self, other: 'TimeWindow') -> 'TimeWindow':
"""
Returns the minimal window covers both this window and the given window.
"""
return TimeWindow(min(self.start, other.start), max(self.end, other.end))
@staticmethod
def get_window_start_with_offset(timestamp: int, offset: int, window_size: int):
"""
Method to get the window start for a timestamp.
:param timestamp: epoch millisecond to get the window start.
:param offset: The offset which window start would be shifted by.
:param window_size: The size of the generated windows.
:return: window start
"""
return timestamp - (timestamp - offset + window_size) % window_size
@staticmethod
def merge_windows(windows: Iterable['TimeWindow'],
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
"""
Merge overlapping :class`TimeWindow`.
"""
sorted_windows = list(windows)
sorted_windows.sort()
merged = []
current_merge = None
current_merge_set = set()
for candidate in sorted_windows:
if current_merge is None:
current_merge = candidate
current_merge_set.add(candidate)
elif current_merge.intersects(candidate):
current_merge = current_merge.cover(candidate)
current_merge_set.add(candidate)
else:
merged.append((current_merge, current_merge_set))
current_merge = candidate
current_merge_set = set()
current_merge_set.add(candidate)
if current_merge is not None:
merged.append((current_merge, current_merge_set))
for merge_key, merge_set in merged:
if len(merge_set) > 1:
callback.merge(merge_set, merge_key)
def __hash__(self):
return self.start + mod_inverse((self.end << 1) + 1)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.end == other.end \
and self.start == other.start
def __lt__(self, other: 'TimeWindow'):
if not isinstance(other, TimeWindow):
raise Exception("Does not support comparison with non-TimeWindow %s" % other)
return self.start == other.start and self.end < other.end or self.start < other.start
def __le__(self, other: 'TimeWindow'):
return self.__eq__(other) and self.__lt__(other)
def __repr__(self):
return "TimeWindow(start={}, end={})".format(self.start, self.end)
class CountWindow(Window):
"""
A Window that represents a count window. For each count window, we will assign a unique
id. Thus this CountWindow can act as namespace part in state. We can attach data to each
different CountWindow.
"""
def __init__(self, id: int):
super(CountWindow, self).__init__()
self.id = id
def max_timestamp(self) -> int:
return MAX_LONG_VALUE
def __hash__(self):
return long_to_int_with_bit_mixing(self.id)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.id == other.id
def __repr__(self):
return "CountWindow(id={})".format(self.id)
class GlobalWindow(Window):
"""
The default window into which all data is placed GlobalWindows.
"""
def __init__(self):
super(GlobalWindow, self).__init__()
@staticmethod
def get() -> 'GlobalWindow':
return GlobalWindow()
def max_timestamp(self) -> int:
return MAX_LONG_VALUE
def __eq__(self, other):
return self.__class__ == other.__class__
def __hash__(self):
return 0
def __repr__(self):
return "GlobalWindow"
class TimeWindowSerializer(TypeSerializer[TimeWindow]):
"""
The serializer used to write the TimeWindow type.
"""
def __init__(self):
self._underlying_coder = None
def serialize(self, element: TimeWindow, stream: BytesIO) -> None:
if self._underlying_coder is None:
self._underlying_coder = self._get_coder().get_impl()
bytes_data = self._underlying_coder.encode(element)
stream.write(bytes_data)
def deserialize(self, stream: BytesIO) -> TimeWindow:
if self._underlying_coder is None:
self._underlying_coder = self._get_coder().get_impl()
bytes_data: bytes = stream.read(16)
return self._underlying_coder.decode(bytes_data)
def _get_coder(self):
from pyflink.fn_execution import coders
return coders.TimeWindowCoder()
class CountWindowSerializer(TypeSerializer[CountWindow]):
def __init__(self):
self._underlying_coder = None
def serialize(self, element: CountWindow, stream: BytesIO) -> None:
if self._underlying_coder is None:
self._underlying_coder = self._get_coder().get_impl()
bytes_data = self._underlying_coder.encode(element)
stream.write(bytes_data)
def deserialize(self, stream: BytesIO) -> CountWindow:
if self._underlying_coder is None:
self._underlying_coder = self._get_coder().get_impl()
bytes_data = stream.read(8)
return self._underlying_coder.decode(bytes_data)
def _get_coder(self):
from pyflink.fn_execution import coders
return coders.CountWindowCoder()
class GlobalWindowSerializer(TypeSerializer[GlobalWindow]):
"""
A TypeSerializer for GlobalWindow.
"""
def __init__(self):
self._underlying_coder = None
def serialize(self, element: GlobalWindow, stream: BytesIO) -> None:
if self._underlying_coder is None:
self._underlying_coder = self._get_coder().get_impl()
bytes_data = self._underlying_coder.encode(element)
stream.write(bytes_data)
def deserialize(self, stream: BytesIO) -> GlobalWindow:
if self._underlying_coder is None:
self._underlying_coder = self._get_coder().get_impl()
bytes_data = stream.read(8)
return self._underlying_coder.decode(bytes_data)
def _get_coder(self):
from pyflink.fn_execution import coders
return coders.GlobalWindowCoder()
T = TypeVar('T')
W = TypeVar('W')
W2 = TypeVar('W2')
IN = TypeVar('IN')
OUT = TypeVar('OUT')
KEY = TypeVar('KEY')
class TriggerResult(Enum):
"""
Result type for trigger methods. This determines what happens with the window, for example
whether the window function should be called, or the window should be discarded.
If a :class:`Trigger` returns TriggerResult.FIRE or TriggerResult.FIRE_AND_PURGE but the window
does not contain any data the window function will not be invoked, i.e. no data will be produced
for the window.
- CONTINUE: No action is taken on the window.
- FIRE_AND_PURGE: Evaluates the window function and emits the 'window result'.
- FIRE: On FIRE, the window is evaluated and results are emitted. The window is not purged
though, all elements are retained.
- PURGE: All elements in the window are cleared and the window is discarded, without
evaluating the window function or emitting any elements.
"""
CONTINUE = (False, False)
FIRE_AND_PURGE = (True, True)
FIRE = (True, False)
PURGE = (False, True)
def is_fire(self) -> bool:
return self.value[0]
def is_purge(self) -> bool:
return self.value[1]
class Trigger(ABC, Generic[T, W]):
"""
A Trigger determines when a pane of a window should be evaluated to emit the results for that
part of the window.
A pane is the bucket of elements that have the same key (assigned by the KeySelector) and same
Window. An element can be in multiple panes if it was assigned to multiple windows by the
WindowAssigner. These panes all have their own instance of the Trigger.
Triggers must not maintain state internally since they can be re-created or reused for different
keys. All necessary state should be persisted using the state abstraction available on the
TriggerContext.
When used with a MergingWindowAssigner the Trigger must return true from :func:`can_merge` and
:func:`on_merge` most be properly implemented.
"""
class TriggerContext(ABC):
"""
A context object that is given to :class:`Trigger` methods to allow them to register timer
callbacks and deal with state.
"""
@abstractmethod
def get_current_processing_time(self) -> int:
"""
:return: The current processing time.
"""
pass
@abstractmethod
def get_metric_group(self) -> MetricGroup:
"""
Returns the metric group for this :class:`Trigger`. This is the same metric group that
would be returned from
:func:`~pyflink.datasteam.functions.RuntimeContext.get_metric_group` in a user function.
:return: The metric group.
"""
pass
@abstractmethod
def get_current_watermark(self) -> int:
"""
:return: The current watermark time.
"""
pass
@abstractmethod
def register_processing_time_timer(self, time: int) -> None:
"""
Register a system time callback. When the current system time passes the specified time
:func:`~Trigger.on_processing_time` is called with the time specified here.
:param time: The time at which to invoke :func:`~Trigger.on_processing_time`.
"""
pass
@abstractmethod
def register_event_time_timer(self, time: int) -> None:
"""
Register an event-time callback. When the current watermark passes the specified time
:func:`~Trigger.on_event_time` is called with the time specified here.
:param time: The watermark at which to invoke :func:`~Trigger.on_event_time`.
"""
pass
@abstractmethod
def delete_processing_time_timer(self, time: int) -> None:
"""
Delete the processing time trigger for the given time.
"""
pass
@abstractmethod
def delete_event_time_timer(self, time: int) -> None:
"""
Delete the event-time trigger for the given time.
"""
pass
@abstractmethod
def get_partitioned_state(self, state_descriptor: StateDescriptor) -> State:
"""
Retrieves a :class:`State` object that can be used to interact with fault-tolerant state
that is scoped to the window and key of the current trigger invocation.
:param state_descriptor: The StateDescriptor that contains the name and type of the
state that is being accessed.
:return: The partitioned state object.
"""
pass
class OnMergeContext(TriggerContext):
"""
Extension of :class:`TriggerContext` that is given to :func:`~Trigger.on_merge`.
"""
@abstractmethod
def merge_partitioned_state(self, state_descriptor: StateDescriptor) -> None:
pass
@abstractmethod
def on_element(self,
element: T,
timestamp: int,
window: W,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
"""
Called for every element that gets added to a pane. The result of this will determine
whether the pane is evaluated to emit results.
:param element: The element that arrived.
:param timestamp: The timestamp of the element that arrived.
:param window: The window to which the element is being added.
:param ctx: A context object that can be used to register timer callbacks.
"""
pass
@abstractmethod
def on_processing_time(self,
time: int,
window: W,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
"""
Called when a processing-time timer that was set using the trigger context fires.
:param time: The timestamp at which the timer fired.
:param window: The window for which the timer fired.
:param ctx: A context object that can be used to register timer callbacks.
"""
pass
@abstractmethod
def on_event_time(self, time: int, window: W, ctx: 'Trigger.TriggerContext') -> TriggerResult:
"""
Called when an event-time timer that was set using the trigger context fires.
:param time: The timestamp at which the timer fired.
:param window: The window for which the timer fired.
:param ctx: A context object that can be used to register timer callbacks.
"""
pass
def can_merge(self) -> bool:
"""
.. note:: If this returns true you must properly implement :func:`~Trigger.on_merge`
:return: True if this trigger supports merging of trigger state and can therefore be used
with a MergingWindowAssigner.
"""
return False
@abstractmethod
def on_merge(self, window: W, ctx: 'Trigger.OnMergeContext') -> None:
"""
Called when several windows have been merged into one window by the :class:`WindowAssigner`.
:param window: The new window that results from the merge.
:param ctx: A context object that can be used to register timer callbacks and access state.
"""
pass
@abstractmethod
def clear(self, window: W, ctx: 'Trigger.TriggerContext') -> None:
"""
Clears any state that the trigger might still hold for the given window. This is called when
a window is purged. Timers set using :func:`~TriggerContext.register_event_time_timer` and
:func:`~TriggerContext.register_processing_time_timer` should be deleted here as well as
state acquired using :func:`~TriggerContext.get_partitioned_state`.
"""
pass
class WindowAssigner(ABC, Generic[T, W]):
"""
A :class:`WindowAssigner` assigns zero or more :class:`Window` to an element.
In a window operation, elements are grouped by their key (if available) and by the windows to
which it was assigned. The set of elements with the same key and window is called a pane. When a
:class:`Trigger` decides that a certain pane should fire the WindowFunction is applied to
produce output elements for that pane.
"""
class WindowAssignerContext(ABC):
"""
A context provided to the :class:`WindowAssigner` that allows it to query the current
processing time.
"""
@abstractmethod
def get_current_processing_time(self) -> int:
"""
:return: The current processing time.
"""
pass
@abstractmethod
def get_runtime_context(self) -> RuntimeContext:
"""
:return: The current runtime context.
"""
pass
@abstractmethod
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[W]:
"""
:param element: The element to which windows should be assigned.
:param timestamp: The timestamp of the element.
:param context: The :class:`WindowAssignerContext` in which the assigner operates.
:return: A collection of windows that should be assigned to the element.
"""
pass
@abstractmethod
def get_default_trigger(self, env) -> Trigger[T, W]:
"""
:param env: The StreamExecutionEnvironment used to compile the DataStream job.
:return: The default trigger associated with this :class:`WindowAssigner`.
"""
pass
@abstractmethod
def get_window_serializer(self) -> TypeSerializer[W]:
"""
:return: A TypeSerializer for serializing windows that are assigned by this WindowAssigner.
"""
pass
@abstractmethod
def is_event_time(self) -> bool:
"""
:return: True if elements are assigned to windows based on event time, false otherwise.
"""
pass
class MergingWindowAssigner(WindowAssigner[T, W]):
"""
A `WindowAssigner` that can merge windows.
"""
class MergeCallback(ABC, Generic[W2]):
"""
Callback to be used in :func:`~MergingWindowAssigner.merge_windows` for specifying which
windows should be merged.
"""
@abstractmethod
def merge(self, to_be_merged: Iterable[W2], merge_result: W2) -> None:
"""
Specifies that the given windows should be merged into the result window.
:param to_be_merged: The list of windows that should be merged into one window.
:param merge_result: The resulting merged window.
"""
pass
@abstractmethod
def merge_windows(self,
windows: Iterable[W],
callback: 'MergingWindowAssigner.MergeCallback[W]') -> None:
"""
Determines which windows (if any) should be merged.
:param windows: The window candidates.
:param callback: A callback that can be invoked to signal which windows should be merged.
"""
pass
class WindowOperationDescriptor(object):
def __init__(self,
assigner: WindowAssigner,
trigger: Trigger,
allowed_lateness: int,
late_data_output_tag: Optional[OutputTag],
window_state_descriptor: StateDescriptor,
window_serializer: TypeSerializer,
internal_window_function: InternalWindowFunction):
self.assigner = assigner
self.trigger = trigger
self.allowed_lateness = allowed_lateness
self.late_data_output_tag = late_data_output_tag
self.window_state_descriptor = window_state_descriptor
self.internal_window_function = internal_window_function
self.window_serializer = window_serializer
def generate_op_name(self):
return type(self.assigner).__name__
def generate_op_desc(self, windowed_stream_type, func_desc):
return "%s(%s, %s, %s)" % (
windowed_stream_type, self.assigner, type(self.trigger).__name__, func_desc)
class SessionWindowTimeGapExtractor(ABC):
"""
A SessionWindowTimeGapExtractor extracts session time gaps for Dynamic Session Window
Assigners.
"""
@abstractmethod
def extract(self, element: Any) -> int:
"""
Extracts the session time gap.
:param element The input element.
:return The session time gap in milliseconds.
"""
pass
class EventTimeTrigger(Trigger[T, TimeWindow]):
"""
A Trigger that fires once the watermark passes the end of the window to which a pane belongs.
"""
def on_element(self,
element: T,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if window.max_timestamp() <= ctx.get_current_watermark():
return TriggerResult.FIRE
else:
ctx.register_event_time_timer(window.max_timestamp())
# No action is taken on the window.
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
# No action is taken on the window.
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if time == window.max_timestamp():
return TriggerResult.FIRE
else:
# No action is taken on the window.
return TriggerResult.CONTINUE
def can_merge(self) -> bool:
return True
def on_merge(self,
window: TimeWindow,
ctx: 'Trigger.OnMergeContext') -> None:
window_max_timestamp = window.max_timestamp()
if window_max_timestamp > ctx.get_current_watermark():
ctx.register_event_time_timer(window_max_timestamp)
def clear(self,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> None:
ctx.delete_event_time_timer(window.max_timestamp())
@staticmethod
def create() -> 'EventTimeTrigger':
return EventTimeTrigger()
class ContinuousEventTimeTrigger(Trigger[T, TimeWindow]):
"""
A Trigger that continuously fires based on a given time interval. This fires based Watermarks.
"""
def __init__(self, interval: int):
self.interval = interval
self.state_desc = ReducingStateDescriptor("fire-time", Min, Types.LONG())
self.fire_timestamp_state = None
@staticmethod
def of(interval: Time) -> 'ContinuousEventTimeTrigger':
return ContinuousEventTimeTrigger(interval.to_milliseconds())
def on_element(self,
element: T,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if window.max_timestamp() <= ctx.get_current_watermark():
# if the watermark is already past the window fire immediately
return TriggerResult.FIRE
else:
ctx.register_event_time_timer(window.max_timestamp())
fire_timestamp_state = cast(ReducingState, ctx.get_partitioned_state(self.state_desc))
if fire_timestamp_state.get() is None:
self.register_next_fire_timestamp(timestamp - (timestamp % self.interval), window, ctx,
fire_timestamp_state)
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
if time == window.max_timestamp():
return TriggerResult.FIRE
fire_timestamp_state = cast(ReducingState, ctx.get_partitioned_state(self.state_desc))
fire_timestamp = fire_timestamp_state.get()
if fire_timestamp is not None and fire_timestamp == time:
fire_timestamp_state.clear()
self.register_next_fire_timestamp(time, window, ctx, fire_timestamp_state)
return TriggerResult.FIRE
return TriggerResult.CONTINUE
def on_merge(self, window: TimeWindow, ctx: 'Trigger.OnMergeContext') -> None:
ctx.merge_partitioned_state(self.state_desc)
next_fire_timestamp = cast(ReducingState, ctx.get_partitioned_state(self.state_desc)).get()
if next_fire_timestamp is not None:
ctx.register_event_time_timer(next_fire_timestamp)
def clear(self, window: TimeWindow, ctx: 'Trigger.TriggerContext') -> None:
fire_timestamp = cast(ReducingState, ctx.get_partitioned_state(self.state_desc))
timestamp = fire_timestamp.get()
if timestamp is not None:
ctx.delete_event_time_timer(timestamp)
fire_timestamp.clear()
def can_merge(self) -> bool:
return True
def register_next_fire_timestamp(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext',
fire_timestamp_state: ReducingState):
next_fire_timestamp = min(time + self.interval, window.max_timestamp())
fire_timestamp_state.add(next_fire_timestamp)
ctx.register_event_time_timer(next_fire_timestamp)
class Min(ReduceFunction):
def reduce(self, value1, value2):
return min(value1, value2)
class ProcessingTimeTrigger(Trigger[T, TimeWindow]):
"""
A Trigger that fires once the current system time passes the end of the window to
which a pane belongs.
"""
def on_element(self,
element: T,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
ctx.register_processing_time_timer(window.max_timestamp())
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.FIRE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def can_merge(self) -> bool:
return True
def on_merge(self,
window: TimeWindow,
ctx: 'Trigger.OnMergeContext') -> None:
window_max_timestamp = window.max_timestamp()
if window_max_timestamp > ctx.get_current_processing_time():
ctx.register_processing_time_timer(window_max_timestamp)
def clear(self,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> None:
ctx.delete_processing_time_timer(window.max_timestamp())
@staticmethod
def create() -> 'ProcessingTimeTrigger':
return ProcessingTimeTrigger()
class ContinuousProcessingTimeTrigger(Trigger[T, TimeWindow]):
"""
A Trigger that continuously fires based on a given time interval as measured by the clock of the
machine on which the job is running.
"""
def __init__(self, interval: int):
self.interval = interval
self.state_desc = ReducingStateDescriptor("fire-time", Min, Types.LONG())
self.fire_timestamp_state = None
@staticmethod
def of(interval: Time) -> 'ContinuousProcessingTimeTrigger':
return ContinuousProcessingTimeTrigger(interval.to_milliseconds())
def on_element(self,
element: T,
timestamp: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
fire_timestamp_state = cast(ReducingState, ctx.get_partitioned_state(self.state_desc))
timestamp = ctx.get_current_processing_time()
if fire_timestamp_state.get() is None:
self.register_next_fire_timestamp(timestamp - (timestamp % self.interval), window, ctx,
fire_timestamp_state)
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
fire_timestamp_state = cast(ReducingState, ctx.get_partitioned_state(self.state_desc))
if fire_timestamp_state.get() == time:
fire_timestamp_state.clear()
self.register_next_fire_timestamp(time, window, ctx, fire_timestamp_state)
return TriggerResult.FIRE
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_merge(self, window: TimeWindow, ctx: 'Trigger.OnMergeContext') -> None:
# States for old windows will lose after the call.
ctx.merge_partitioned_state(self.state_desc)
# Register timer for this new window.
next_fire_timestamp = cast(ReducingState, ctx.get_partitioned_state(self.state_desc)).get()
if next_fire_timestamp is not None:
ctx.register_processing_time_timer(next_fire_timestamp)
def clear(self, window: TimeWindow, ctx: 'Trigger.TriggerContext') -> None:
fire_timestamp_state = cast(ReducingState, ctx.get_partitioned_state(self.state_desc))
timestamp = fire_timestamp_state.get()
if timestamp is not None:
ctx.delete_processing_time_timer(timestamp)
fire_timestamp_state.clear()
def can_merge(self) -> bool:
return True
def register_next_fire_timestamp(self,
time: int,
window: TimeWindow,
ctx: 'Trigger.TriggerContext',
fire_timestamp_state: ReducingState):
next_fire_timestamp = min(time + self.interval, window.max_timestamp())
fire_timestamp_state.add(next_fire_timestamp)
ctx.register_processing_time_timer(next_fire_timestamp)
class PurgingTrigger(Trigger[T, Window]):
"""
A trigger that can turn any Trigger into a purging Trigger.
When the nested trigger fires, this will return a FIRE_AND_PURGE TriggerResult.
"""
def __init__(self, nested_trigger: Trigger[T, Window]):
self.nested_trigger = nested_trigger
@staticmethod
def of(nested_trigger: Trigger[T, Window]) -> 'PurgingTrigger':
return PurgingTrigger(nested_trigger)
def on_element(self,
element: T,
timestamp: int,
window: Window,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
trigger_result = self.nested_trigger.on_element(element, timestamp, window, ctx)
if trigger_result.is_fire() is True:
return TriggerResult.FIRE_AND_PURGE
else:
return trigger_result
def on_event_time(self,
time: int,
window: Window,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
trigger_result = self.nested_trigger.on_event_time(time, window, ctx)
if trigger_result.is_fire() is True:
return TriggerResult.FIRE_AND_PURGE
else:
return trigger_result
def on_processing_time(self,
time: int,
window: Window,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
trigger_result = self.nested_trigger.on_processing_time(time, window, ctx)
if trigger_result.is_fire() is True:
return TriggerResult.FIRE_AND_PURGE
else:
return trigger_result
def clear(self,
window: Window,
ctx: 'Trigger.TriggerContext') -> None:
self.nested_trigger.clear(window, ctx)
def can_merge(self) -> bool:
return self.nested_trigger.can_merge()
def on_merge(self,
window: Window,
ctx: 'Trigger.OnMergeContext') -> None:
self.nested_trigger.on_merge(window, ctx)
class CountTrigger(Trigger[T, CountWindow]):
"""
A Trigger that fires once the count of elements in a pane reaches the given count.
"""
def __init__(self, window_size: int):
self._window_size = window_size
self._count_state_descriptor = ReducingStateDescriptor(
"count", lambda a, b: a + b, Types.LONG())
@staticmethod
def of(window_size: int) -> 'CountTrigger':
return CountTrigger(window_size)
def on_element(self,
element: T,
timestamp: int,
window: CountWindow,
ctx: Trigger.TriggerContext) -> TriggerResult:
count_state = cast(ReducingState, ctx.get_partitioned_state(self._count_state_descriptor))
count_state.add(1)
if count_state.get() >= self._window_size:
# On FIRE, the window is evaluated and results are emitted. The window is not purged
# though, all elements are retained.
count_state.clear()
return TriggerResult.FIRE
else:
# No action is taken on the window.
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: CountWindow,
ctx: Trigger.TriggerContext) -> TriggerResult:
# No action is taken on the window.
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: CountWindow,
ctx: Trigger.TriggerContext) -> TriggerResult:
# No action is taken on the window.
return TriggerResult.CONTINUE
def can_merge(self) -> bool:
return True
def on_merge(self, window: CountWindow, ctx: Trigger.OnMergeContext) -> None:
ctx.merge_partitioned_state(self._count_state_descriptor)
def clear(self, window: CountWindow, ctx: Trigger.TriggerContext) -> None:
count_state = ctx.get_partitioned_state(self._count_state_descriptor)
count_state.clear()
class NeverTrigger(Trigger[T, GlobalWindow]):
"""
A trigger that never fires, as default Trigger for GlobalWindows.
"""
def on_element(self,
element: T,
timestamp: int,
window: GlobalWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_processing_time(self,
time: int,
window: GlobalWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_event_time(self,
time: int,
window: GlobalWindow,
ctx: 'Trigger.TriggerContext') -> TriggerResult:
return TriggerResult.CONTINUE
def on_merge(self,
window: GlobalWindow,
ctx: 'Trigger.OnMergeContext') -> None:
pass
def clear(self,
window: GlobalWindow,
ctx: 'Trigger.TriggerContext') -> None:
pass
class CountTumblingWindowAssigner(WindowAssigner[T, CountWindow]):
"""
A WindowAssigner that windows elements into fixed-size windows based on the count number
of the elements. Windows cannot overlap.
"""
def __init__(self, window_size: int):
"""
:param window_size: The size of the windows in number of elements.
"""
self._window_size = window_size
self._count_descriptor = ValueStateDescriptor('tumble-count-assigner', Types.LONG())
@staticmethod
def of(window_size: int) -> 'CountTumblingWindowAssigner':
return CountTumblingWindowAssigner(window_size)
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[CountWindow]:
count_state = context.get_runtime_context().get_state(self._count_descriptor)
count_value = count_state.value()
if count_value is None:
current_count = 0
else:
current_count = count_value
count_state.update(current_count + 1)
return [CountWindow(current_count // self._window_size)]
def get_default_trigger(self, env) -> Trigger[T, CountWindow]:
return CountTrigger(self._window_size)
def get_window_serializer(self) -> TypeSerializer[CountWindow]:
return CountWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self) -> str:
return "CountTumblingWindowAssigner(%s)" % self._window_size
class CountSlidingWindowAssigner(WindowAssigner[T, CountWindow]):
"""
A WindowAssigner that windows elements into sliding windows based on the count number of
the elements. Windows can possibly overlap.
"""
def __init__(self, window_size: int, window_slide: int):
"""
:param window_size: The size of the windows in number of elements.
:param window_slide: The slide interval in number of elements.
"""
self._window_size = window_size
self._window_slide = window_slide
self._count_descriptor = ValueStateDescriptor('slide-count-assigner', Types.LONG())
@staticmethod
def of(window_size: int, window_slide: int) -> 'CountSlidingWindowAssigner':
return CountSlidingWindowAssigner(window_size, window_slide)
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[CountWindow]:
count_state = context.get_runtime_context().get_state(self._count_descriptor)
count_value = count_state.value()
if count_value is None:
current_count = 0
else:
current_count = count_value
count_state.update(current_count + 1)
last_id = current_count // self._window_slide
last_start = last_id * self._window_slide
last_end = last_start + self._window_size - 1
windows = []
while last_id >= 0 and last_start <= current_count <= last_end:
if last_start <= current_count <= last_end:
windows.append(CountWindow(last_id))
last_id -= 1
last_start -= self._window_slide
last_end -= self._window_slide
return windows
def get_default_trigger(self, env) -> Trigger[T, CountWindow]:
return CountTrigger(self._window_size)
def get_window_serializer(self) -> TypeSerializer[CountWindow]:
return CountWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self):
return "CountSlidingWindowAssigner(%s, %s)" % (self._window_size, self._window_slide)
class TumblingProcessingTimeWindows(WindowAssigner[T, TimeWindow]):
"""
A WindowAssigner that windows elements into windows based on the current system time of
the machine the operation is running on. Windows cannot overlap.
For example, in order to window into windows of 1 minute, every 10 seconds:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(TumblingProcessingTimeWindows.of(Time.minutes(1), Time.seconds(10)))
"""
def __init__(self, size: int, offset: int):
if abs(offset) >= size:
raise Exception("TumblingProcessingTimeWindows parameters must satisfy "
"abs(offset) < size")
self._size = size
self._offset = offset
@staticmethod
def of(size: Time, offset: Time = None) -> 'TumblingProcessingTimeWindows':
"""
Creates a new :class:`TumblingProcessingTimeWindows` :class:`WindowAssigner` that assigns
elements to time windows based on the element timestamp and offset.
For example, if you want window a stream by hour, but window begins at the 15th minutes of
each hour, you can use of(Time.hours(1), Time.minutes(15)), then you will get time
windows start at 0:15:00,1:15:00,2:15:00,etc.
Rather than that, if you are living in somewhere which is not using UTC±00:00 time, such as
China which is using UTC+08:00, and you want a time window with size of one day, and window
begins at every 00:00:00 of local time, you may use of(Time.days(1), Time.hours(-8)).
The parameter of offset is Time.hours(-8) since UTC+08:00 is 8 hours earlier than UTC time.
:param size The size of the generated windows.
:param offset The offset which window start would be shifted by.
:return The time policy.
"""
if offset is None:
return TumblingProcessingTimeWindows(size.to_milliseconds(), 0)
else:
return TumblingProcessingTimeWindows(size.to_milliseconds(), offset.to_milliseconds())
def assign_windows(self,
element: T,
timestamp: int,
context: WindowAssigner.WindowAssignerContext) -> Collection[TimeWindow]:
current_processing_time = context.get_current_processing_time()
start = TimeWindow.get_window_start_with_offset(current_processing_time, self._offset,
self._size)
return [TimeWindow(start, start + self._size)]
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return ProcessingTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self):
return "TumblingProcessingTimeWindows(%s, %s)" % (self._size, self._offset)
class TumblingEventTimeWindows(WindowAssigner[T, TimeWindow]):
"""
A WindowAssigner that windows elements into windows based on the timestamp of the
elements. Windows cannot overlap.
For example, in order to window into windows of 1 minute:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(TumblingEventTimeWindows.of(Time.minutes(1)))
"""
def __init__(self, size: int, offset: int):
if abs(offset) >= size:
raise Exception("TumblingEventTimeWindows parameters must satisfy abs(offset) < size")
self._size = size
self._offset = offset
@staticmethod
def of(size: Time, offset: Time = None) -> 'TumblingEventTimeWindows':
"""
Creates a new TumblingEventTimeWindows WindowAssigner that assigns elements
to time windows based on the element timestamp, offset and a staggering offset, depending on
the staggering policy.
:param size The size of the generated windows.
:param offset The globalOffset which window start would be shifted by.
"""
if offset is None:
return TumblingEventTimeWindows(size.to_milliseconds(), 0)
else:
return TumblingEventTimeWindows(size.to_milliseconds(), offset.to_milliseconds())
def assign_windows(self,
element: T,
timestamp: int,
context: WindowAssigner.WindowAssignerContext) -> Collection[TimeWindow]:
if timestamp > MIN_LONG_VALUE:
start = TimeWindow.get_window_start_with_offset(timestamp, self._offset, self._size)
return [TimeWindow(start, start + self._size)]
else:
raise Exception("Record has Java Long.MIN_VALUE timestamp (= no timestamp marker). "
+ "Is the time characteristic set to 'ProcessingTime', "
+ "or did you forget to call "
+ "'data_stream.assign_timestamps_and_watermarks(...)'?")
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return EventTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
def __repr__(self):
return "TumblingEventTimeWindows(%s, %s)" % (self._size, self._offset)
class SlidingProcessingTimeWindows(WindowAssigner[T, TimeWindow]):
"""
A WindowAssigner that windows elements into sliding windows based on the current system
time of the machine the operation is running on. Windows can possibly overlap.
For example, in order to window into windows of 1 minute, every 10 seconds:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(SlidingProcessingTimeWindows.of(Time.minutes(1), Time.seconds(10)))
"""
def __init__(self, size: int, slide: int, offset: int):
if abs(offset) >= slide or size <= 0:
raise Exception("SlidingProcessingTimeWindows parameters must satisfy "
+ "abs(offset) < slide and size > 0")
self._size = size
self._slide = slide
self._offset = offset
self._pane_size = math.gcd(size, slide)
@staticmethod
def of(size: Time, slide: Time, offset: Time = None) -> 'SlidingProcessingTimeWindows':
"""
Creates a new :class:`SlidingProcessingTimeWindows` :class:`WindowAssigner` that assigns
elements to time windows based on the element timestamp and offset.
For example, if you want window a stream by hour,but window begins at the 15th minutes of
each hour, you can use of(Time.hours(1),Time.minutes(15)),then you will get time
windows start at 0:15:00,1:15:00,2:15:00,etc.
Rather than that, if you are living in somewhere which is not using UTC±00:00 time, such as
China which is using UTC+08:00, and you want a time window with size of one day, and window
begins at every 00:00:00 of local time,you may use of(Time.days(1),Time.hours(-8)).
The parameter of offset is Time.hours(-8) since UTC+08:00 is 8 hours earlier than UTC time.
:param size The size of the generated windows.
:param slide The slide interval of the generated windows.
:param offset The offset which window start would be shifted by.
:return The time policy.
"""
if offset is None:
return SlidingProcessingTimeWindows(size.to_milliseconds(), slide.to_milliseconds(), 0)
else:
return SlidingProcessingTimeWindows(size.to_milliseconds(), slide.to_milliseconds(),
offset.to_milliseconds())
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[TimeWindow]:
current_processing_time = context.get_current_processing_time()
last_start = TimeWindow.get_window_start_with_offset(
current_processing_time, self._offset, self._slide)
windows = [TimeWindow(start, start + self._size)
for start in range(last_start,
current_processing_time - self._size, -self._slide)]
return windows
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return ProcessingTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self) -> str:
return "SlidingProcessingTimeWindows(%s, %s, %s)" % (self._size, self._slide, self._offset)
class SlidingEventTimeWindows(WindowAssigner[T, TimeWindow]):
"""
A WindowAssigner that windows elements into sliding windows based on the timestamp of the
elements. Windows can possibly overlap.
For example, in order to window into windows of 1 minute, every 10 seconds:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(SlidingEventTimeWindows.of(Time.minutes(1), Time.seconds(10)))
"""
def __init__(self, size: int, slide: int, offset: int):
if abs(offset) >= slide or size <= 0:
raise Exception("SlidingEventTimeWindows parameters must satisfy "
+ "abs(offset) < slide and size > 0")
self._size = size
self._slide = slide
self._offset = offset
self._pane_size = math.gcd(size, slide)
@staticmethod
def of(size: Time, slide: Time, offset: Time = None) -> 'SlidingEventTimeWindows':
"""
Creates a new :class:`SlidingEventTimeWindows` :class:`WindowAssigner` that assigns elements
to time windows based on the element timestamp and offset.
For example, if you want window a stream by hour,but window begins at the 15th minutes of
each hour, you can use of(Time.hours(1),Time.minutes(15)),then you will get time
windows start at 0:15:00,1:15:00,2:15:00,etc.
Rather than that, if you are living in somewhere which is not using UTC±00:00 time, such as
China which is using UTC+08:00, and you want a time window with size of one day, and window
begins at every 00:00:00 of local time,you may use of(Time.days(1),Time.hours(-8)).
The parameter of offset is Time.hours(-8) since UTC+08:00 is 8 hours earlier than UTC time.
:param size The size of the generated windows.
:param slide The slide interval of the generated windows.
:param offset The offset which window start would be shifted by.
:return The time policy.
"""
if offset is None:
return SlidingEventTimeWindows(size.to_milliseconds(), slide.to_milliseconds(), 0)
else:
return SlidingEventTimeWindows(size.to_milliseconds(), slide.to_milliseconds(),
offset.to_milliseconds())
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[TimeWindow]:
if timestamp > MIN_LONG_VALUE:
last_start = TimeWindow.get_window_start_with_offset(timestamp,
self._offset, self._slide)
windows = [TimeWindow(start, start + self._size)
for start in range(last_start, timestamp - self._size, -self._slide)]
return windows
else:
raise Exception("Record has Java Long.MIN_VALUE timestamp (= no timestamp marker). "
+ "Is the time characteristic set to 'ProcessingTime', "
"or did you forget to call "
+ "'data_stream.assign_timestamps_and_watermarks(...)'?")
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return EventTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
def __repr__(self) -> str:
return "SlidingEventTimeWindows(%s, %s, %s)" % (self._size, self._slide, self._offset)
class ProcessingTimeSessionWindows(MergingWindowAssigner[T, TimeWindow]):
"""
A WindowAssigner that windows elements into sessions based on the current processing
time. Windows cannot overlap.
For example, the processing interval is set to 1 minutes:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(ProcessingTimeSessionWindows.with_gap(Time.minutes(1)))
"""
def __init__(self, session_gap: int):
if session_gap <= 0:
raise Exception("ProcessingTimeSessionWindows parameters must satisfy 0 < size")
self._session_gap = session_gap
@staticmethod
def with_gap(size: Time) -> 'ProcessingTimeSessionWindows':
"""
Creates a new SessionWindows WindowAssigner that assigns elements to sessions based on
the element timestamp.
:param size: The session timeout, i.e. the time gap between sessions
:return: The policy.
"""
return ProcessingTimeSessionWindows(size.to_milliseconds())
@staticmethod
def with_dynamic_gap(
extractor: SessionWindowTimeGapExtractor) -> 'DynamicProcessingTimeSessionWindows':
"""
Creates a new SessionWindows WindowAssigner that assigns elements to sessions based on the
element timestamp.
:param extractor: The extractor to use to extract the time gap from the input elements.
:return: The policy.
"""
return DynamicProcessingTimeSessionWindows(extractor)
def merge_windows(self,
windows: Iterable[TimeWindow],
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
TimeWindow.merge_windows(windows, callback)
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[TimeWindow]:
timestamp = context.get_current_processing_time()
return [TimeWindow(timestamp, timestamp + self._session_gap)]
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return ProcessingTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self):
return "ProcessingTimeSessionWindows(%s)" % self._session_gap
class EventTimeSessionWindows(MergingWindowAssigner[T, TimeWindow]):
"""
A :class:`WindowAssigner` that windows elements into sessions based on the timestamp of the
elements. Windows cannot overlap.
For example, Set the timestamp of the element to 1 minutes:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(EventTimeSessionWindows.with_gap(Time.minutes(1)))
"""
def __init__(self, session_gap: int):
if session_gap <= 0:
raise Exception("EventTimeSessionWindows parameters must satisfy 0 < size")
self._session_gap = session_gap
@staticmethod
def with_gap(size: Time) -> 'EventTimeSessionWindows':
"""
Creates a new SessionWindows WindowAssigner that assigns elements to sessions
based on the element timestamp.
:param size: The session timeout, i.e. the time gap between sessions.
:return: The policy.
"""
return EventTimeSessionWindows(size.to_milliseconds())
@staticmethod
def with_dynamic_gap(
extractor: SessionWindowTimeGapExtractor) -> 'DynamicEventTimeSessionWindows':
"""
Creates a new SessionWindows WindowAssigner that assigns elements to sessions based on
the element timestamp.
:param extractor: The extractor to use to extract the time gap from the input elements.
:return: The policy.
"""
return DynamicEventTimeSessionWindows(extractor)
def merge_windows(self,
windows: Iterable[TimeWindow],
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
TimeWindow.merge_windows(windows, callback)
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[TimeWindow]:
return [TimeWindow(timestamp, timestamp + self._session_gap)]
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return EventTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
def __repr__(self):
return "EventTimeSessionWindows(%s)" % self._session_gap
class DynamicProcessingTimeSessionWindows(MergingWindowAssigner[T, TimeWindow]):
"""
A WindowAssigner that windows elements into sessions based on the current processing
time. Windows cannot overlap.
For example, in order to window into windows with a dynamic time gap:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(DynamicProcessingTimeSessionWindows.with_dynamic_gap(extractor))
"""
def __init__(self,
session_window_time_gap_extractor: SessionWindowTimeGapExtractor):
self._session_gap = 0
self._session_window_time_gap_extractor = session_window_time_gap_extractor
@staticmethod
def with_dynamic_gap(
extractor: SessionWindowTimeGapExtractor) -> 'DynamicProcessingTimeSessionWindows':
"""
Creates a new SessionWindows WindowAssigner that assigns elements to sessions based
on the element timestamp.
:param extractor: The extractor to use to extract the time gap from the input elements.
:return: The policy.
"""
return DynamicProcessingTimeSessionWindows(extractor)
def merge_windows(self,
windows: Iterable[TimeWindow],
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
TimeWindow.merge_windows(windows, callback)
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[TimeWindow]:
timestamp = context.get_current_processing_time()
self._session_gap = self._session_window_time_gap_extractor.extract(element)
if self._session_gap <= 0:
raise Exception("Dynamic session time gap must satisfy 0 < gap")
return [TimeWindow(timestamp, timestamp + self._session_gap)]
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return ProcessingTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self):
return "DynamicProcessingTimeSessionWindows(%s)" % self._session_gap
class DynamicEventTimeSessionWindows(MergingWindowAssigner[T, TimeWindow]):
"""
A :class:`WindowAssigner` that windows elements into sessions based on the timestamp of the
elements. Windows cannot overlap.
For example, in order to window into windows with a dynamic time gap:
::
>>> data_stream.key_by(lambda x: x[0], key_type=Types.STRING()) \\
... .window(DynamicEventTimeSessionWindows.with_dynamic_gap(extractor))
"""
def __init__(self,
session_window_time_gap_extractor: SessionWindowTimeGapExtractor):
self._session_gap = 0
self._session_window_time_gap_extractor = session_window_time_gap_extractor
@staticmethod
def with_dynamic_gap(
extractor: SessionWindowTimeGapExtractor) -> 'DynamicEventTimeSessionWindows':
"""
Creates a new SessionWindows WindowAssigner that assigns elements to sessions
based on the element timestamp.
:param extractor: The extractor to use to extract the time gap from the input elements.
:return: The policy.
"""
return DynamicEventTimeSessionWindows(extractor)
def merge_windows(self,
windows: Iterable[TimeWindow],
callback: 'MergingWindowAssigner.MergeCallback[TimeWindow]') -> None:
TimeWindow.merge_windows(windows, callback)
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[TimeWindow]:
self._session_gap = self._session_window_time_gap_extractor.extract(element)
if self._session_gap <= 0:
raise Exception("Dynamic session time gap must satisfy 0 < gap")
return [TimeWindow(timestamp, timestamp + self._session_gap)]
def get_default_trigger(self, env) -> Trigger[T, TimeWindow]:
return EventTimeTrigger()
def get_window_serializer(self) -> TypeSerializer[TimeWindow]:
return TimeWindowSerializer()
def is_event_time(self) -> bool:
return True
def __repr__(self):
return "DynamicEventTimeSessionWindows(%s)" % self._session_gap
class GlobalWindows(WindowAssigner[T, GlobalWindow]):
"""
A WindowAssigner that assigns all elements to the same GlobalWindow.
"""
def __init__(self) -> None:
super().__init__()
def assign_windows(self,
element: T,
timestamp: int,
context: 'WindowAssigner.WindowAssignerContext') -> Collection[GlobalWindow]:
return [GlobalWindow.get()]
@staticmethod
def create() -> 'GlobalWindows':
"""
Creates a new GlobalWindows WindowAssigner that assigns all elements to the
same GlobalWindow.
"""
return GlobalWindows()
def get_default_trigger(self, env) -> Trigger[T, GlobalWindow]:
return NeverTrigger()
def get_window_serializer(self) -> TypeSerializer[GlobalWindow]:
return GlobalWindowSerializer()
def is_event_time(self) -> bool:
return False
def __repr__(self) -> str:
return "GlobalWindows()"
| 65,024 | 36.761324 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/execution_mode.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.java_gateway import get_gateway
__all__ = ['RuntimeExecutionMode']
class RuntimeExecutionMode(Enum):
"""
Runtime execution mode of DataStream programs. Among other things, this controls task
scheduling, network shuffle behavior, and time semantics. Some operations will also change
their record emission behaviour based on the configured execution mode.
:data:`STREAMING`:
The Pipeline will be executed with Streaming Semantics. All tasks will be deployed before
execution starts, checkpoints will be enabled, and both processing and event time will be
fully supported.
:data:`BATCH`:
The Pipeline will be executed with Batch Semantics. Tasks will be scheduled gradually based
on the scheduling region they belong, shuffles between regions will be blocking, watermarks
are assumed to be "perfect" i.e. no late data, and processing time is assumed to not advance
during execution.
:data:`AUTOMATIC`:
Flink will set the execution mode to BATCH if all sources are bounded, or STREAMING if there
is at least one source which is unbounded.
"""
STREAMING = 0
BATCH = 1
AUTOMATIC = 2
@staticmethod
def _from_j_execution_mode(j_execution_mode) -> 'RuntimeExecutionMode':
return RuntimeExecutionMode[j_execution_mode.name()]
def _to_j_execution_mode(self):
gateway = get_gateway()
JRuntimeExecutionMode = \
gateway.jvm.org.apache.flink.api.common.RuntimeExecutionMode
return getattr(JRuntimeExecutionMode, self.name)
| 2,550 | 39.492063 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/output_tag.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Optional, Union
from pyflink.common.typeinfo import TypeInformation, Types, RowTypeInfo
from pyflink.java_gateway import get_gateway
class OutputTag(object):
"""
An :class:`OutputTag` is a typed and named tag to use for tagging side outputs of an operator.
Example:
::
# Explicitly specify output type
>>> info = OutputTag("late-data", Types.TUPLE([Types.STRING(), Types.LONG()]))
# Implicitly wrap list to Types.ROW
>>> info_row = OutputTag("row", [Types.STRING(), Types.LONG()])
# Implicitly use pickle serialization
>>> info_side = OutputTag("side")
# ERROR: tag id cannot be empty string (extra requirement for Python API)
>>> info_error = OutputTag("")
"""
def __init__(self, tag_id: str, type_info: Optional[Union[TypeInformation, list]] = None):
if not tag_id:
raise ValueError("OutputTag tag_id cannot be None or empty string")
self.tag_id = tag_id
if type_info is None:
self.type_info = Types.PICKLED_BYTE_ARRAY()
elif isinstance(type_info, list):
self.type_info = RowTypeInfo(type_info)
elif not isinstance(type_info, TypeInformation):
raise TypeError("OutputTag type_info must be None, list or TypeInformation")
else:
self.type_info = type_info
self._j_output_tag = None
def __getstate__(self):
# prevent java object to be pickled
self.type_info._j_typeinfo = None
return self.tag_id, self.type_info
def __setstate__(self, state):
tag_id, type_info = state
self.tag_id = tag_id
self.type_info = type_info
self._j_output_tag = None
def get_java_output_tag(self):
gateway = get_gateway()
if self._j_output_tag is None:
self._j_output_tag = gateway.jvm.org.apache.flink.util.OutputTag(
self.tag_id, self.type_info.get_java_type_info()
)
return self._j_output_tag
| 2,996 | 38.434211 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/checkpoint_config.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from typing import Optional
from pyflink.common import Duration
from pyflink.datastream.checkpoint_storage import CheckpointStorage, _from_j_checkpoint_storage
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.java_gateway import get_gateway
__all__ = ['CheckpointConfig', 'ExternalizedCheckpointCleanup']
class CheckpointConfig(object):
"""
Configuration that captures all checkpointing related settings.
:data:`DEFAULT_MODE`:
The default checkpoint mode: exactly once.
:data:`DEFAULT_TIMEOUT`:
The default timeout of a checkpoint attempt: 10 minutes.
:data:`DEFAULT_MIN_PAUSE_BETWEEN_CHECKPOINTS`:
The default minimum pause to be made between checkpoints: none.
:data:`DEFAULT_MAX_CONCURRENT_CHECKPOINTS`:
The default limit of concurrently happening checkpoints: one.
"""
DEFAULT_MODE = CheckpointingMode.EXACTLY_ONCE
DEFAULT_TIMEOUT = 10 * 60 * 1000
DEFAULT_MIN_PAUSE_BETWEEN_CHECKPOINTS = 0
DEFAULT_MAX_CONCURRENT_CHECKPOINTS = 1
def __init__(self, j_checkpoint_config):
self._j_checkpoint_config = j_checkpoint_config
def is_checkpointing_enabled(self) -> bool:
"""
Checks whether checkpointing is enabled.
:return: True if checkpointing is enables, false otherwise.
"""
return self._j_checkpoint_config.isCheckpointingEnabled()
def get_checkpointing_mode(self) -> CheckpointingMode:
"""
Gets the checkpointing mode (exactly-once vs. at-least-once).
.. seealso:: :func:`set_checkpointing_mode`
:return: The :class:`CheckpointingMode`.
"""
return CheckpointingMode._from_j_checkpointing_mode(
self._j_checkpoint_config.getCheckpointingMode())
def set_checkpointing_mode(self, checkpointing_mode: CheckpointingMode) -> 'CheckpointConfig':
"""
Sets the checkpointing mode (:data:`CheckpointingMode.EXACTLY_ONCE` vs.
:data:`CheckpointingMode.AT_LEAST_ONCE`).
Example:
::
>>> config.set_checkpointing_mode(CheckpointingMode.AT_LEAST_ONCE)
:param checkpointing_mode: The :class:`CheckpointingMode`.
"""
self._j_checkpoint_config.setCheckpointingMode(
CheckpointingMode._to_j_checkpointing_mode(checkpointing_mode))
return self
def get_checkpoint_interval(self) -> int:
"""
Gets the interval in which checkpoints are periodically scheduled.
This setting defines the base interval. Checkpoint triggering may be delayed by the settings
:func:`get_max_concurrent_checkpoints` and :func:`get_min_pause_between_checkpoints`.
:return: The checkpoint interval, in milliseconds.
"""
return self._j_checkpoint_config.getCheckpointInterval()
def set_checkpoint_interval(self, checkpoint_interval: int) -> 'CheckpointConfig':
"""
Sets the interval in which checkpoints are periodically scheduled.
This setting defines the base interval. Checkpoint triggering may be delayed by the settings
:func:`set_max_concurrent_checkpoints` and :func:`set_min_pause_between_checkpoints`.
:param checkpoint_interval: The checkpoint interval, in milliseconds.
"""
self._j_checkpoint_config.setCheckpointInterval(checkpoint_interval)
return self
def get_checkpoint_timeout(self) -> int:
"""
Gets the maximum time that a checkpoint may take before being discarded.
:return: The checkpoint timeout, in milliseconds.
"""
return self._j_checkpoint_config.getCheckpointTimeout()
def set_checkpoint_timeout(self, checkpoint_timeout: int) -> 'CheckpointConfig':
"""
Sets the maximum time that a checkpoint may take before being discarded.
:param checkpoint_timeout: The checkpoint timeout, in milliseconds.
"""
self._j_checkpoint_config.setCheckpointTimeout(checkpoint_timeout)
return self
def get_min_pause_between_checkpoints(self) -> int:
"""
Gets the minimal pause between checkpointing attempts. This setting defines how soon the
checkpoint coordinator may trigger another checkpoint after it becomes possible to trigger
another checkpoint with respect to the maximum number of concurrent checkpoints
(see :func:`get_max_concurrent_checkpoints`).
:return: The minimal pause before the next checkpoint is triggered.
"""
return self._j_checkpoint_config.getMinPauseBetweenCheckpoints()
def set_min_pause_between_checkpoints(self,
min_pause_between_checkpoints: int) -> 'CheckpointConfig':
"""
Sets the minimal pause between checkpointing attempts. This setting defines how soon the
checkpoint coordinator may trigger another checkpoint after it becomes possible to trigger
another checkpoint with respect to the maximum number of concurrent checkpoints
(see :func:`set_max_concurrent_checkpoints`).
If the maximum number of concurrent checkpoints is set to one, this setting makes
effectively sure that a minimum amount of time passes where no checkpoint is in progress
at all.
:param min_pause_between_checkpoints: The minimal pause before the next checkpoint is
triggered.
"""
self._j_checkpoint_config.setMinPauseBetweenCheckpoints(min_pause_between_checkpoints)
return self
def get_max_concurrent_checkpoints(self) -> int:
"""
Gets the maximum number of checkpoint attempts that may be in progress at the same time.
If this value is *n*, then no checkpoints will be triggered while *n* checkpoint attempts
are currently in flight. For the next checkpoint to be triggered, one checkpoint attempt
would need to finish or expire.
:return: The maximum number of concurrent checkpoint attempts.
"""
return self._j_checkpoint_config.getMaxConcurrentCheckpoints()
def set_max_concurrent_checkpoints(self, max_concurrent_checkpoints: int) -> 'CheckpointConfig':
"""
Sets the maximum number of checkpoint attempts that may be in progress at the same time.
If this value is *n*, then no checkpoints will be triggered while *n* checkpoint attempts
are currently in flight. For the next checkpoint to be triggered, one checkpoint attempt
would need to finish or expire.
:param max_concurrent_checkpoints: The maximum number of concurrent checkpoint attempts.
"""
self._j_checkpoint_config.setMaxConcurrentCheckpoints(max_concurrent_checkpoints)
return self
def is_fail_on_checkpointing_errors(self) -> bool:
"""
This determines the behaviour of tasks if there is an error in their local checkpointing.
If this returns true, tasks will fail as a reaction. If this returns false, task will only
decline the failed checkpoint.
:return: ``True`` if failing on checkpointing errors, false otherwise.
"""
return self._j_checkpoint_config.isFailOnCheckpointingErrors()
def set_fail_on_checkpointing_errors(self,
fail_on_checkpointing_errors: bool) -> 'CheckpointConfig':
"""
Sets the expected behaviour for tasks in case that they encounter an error in their
checkpointing procedure. If this is set to true, the task will fail on checkpointing error.
If this is set to false, the task will only decline a the checkpoint and continue running.
The default is true.
Example:
::
>>> config.set_fail_on_checkpointing_errors(False)
:param fail_on_checkpointing_errors: ``True`` if failing on checkpointing errors,
false otherwise.
"""
self._j_checkpoint_config.setFailOnCheckpointingErrors(fail_on_checkpointing_errors)
return self
def get_tolerable_checkpoint_failure_number(self) -> int:
"""
Get the defined number of consecutive checkpoint failures that will be tolerated, before the
whole job is failed over.
:return: The maximum number of tolerated checkpoint failures.
"""
return self._j_checkpoint_config.getTolerableCheckpointFailureNumber()
def set_tolerable_checkpoint_failure_number(self,
tolerable_checkpoint_failure_number: int
) -> 'CheckpointConfig':
"""
This defines how many consecutive checkpoint failures will be tolerated, before the whole
job is failed over. The default value is `0`, which means no checkpoint failures will be
tolerated, and the job will fail on first reported checkpoint failure.
Example:
::
>>> config.set_tolerable_checkpoint_failure_number(2)
:param tolerable_checkpoint_failure_number: The maximum number of tolerated checkpoint
failures.
"""
self._j_checkpoint_config.setTolerableCheckpointFailureNumber(
tolerable_checkpoint_failure_number)
return self
def enable_externalized_checkpoints(
self,
cleanup_mode: 'ExternalizedCheckpointCleanup') -> 'CheckpointConfig':
"""
Sets the mode for externalized checkpoint clean-up. Externalized checkpoints will be enabled
automatically unless the mode is set to
:data:`ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS`.
Externalized checkpoints write their meta data out to persistent storage and are **not**
automatically cleaned up when the owning job fails or is suspended (terminating with job
status ``FAILED`` or ``SUSPENDED``). In this case, you have to manually clean up the
checkpoint state, both the meta data and actual program state.
The :class:`ExternalizedCheckpointCleanup` mode defines how an externalized checkpoint
should be cleaned up on job cancellation. If you choose to retain externalized checkpoints
on cancellation you have to handle checkpoint clean-up manually when you cancel the job as
well (terminating with job status ``CANCELED``).
The target directory for externalized checkpoints is configured via
``org.apache.flink.configuration.CheckpointingOptions#CHECKPOINTS_DIRECTORY``.
Example:
::
>>> config.enable_externalized_checkpoints(
... ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
:param cleanup_mode: Externalized checkpoint clean-up behaviour, the mode could be
:data:`ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION`,
:data:`ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION` or
:data:`ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS`
.. note:: Deprecated in 1.15. Use :func:`set_externalized_checkpoint_cleanup` instead.
"""
self._j_checkpoint_config.enableExternalizedCheckpoints(
ExternalizedCheckpointCleanup._to_j_externalized_checkpoint_cleanup(cleanup_mode))
return self
def set_externalized_checkpoint_cleanup(
self,
cleanup_mode: 'ExternalizedCheckpointCleanup') -> 'CheckpointConfig':
"""
Sets the mode for externalized checkpoint clean-up. Externalized checkpoints will be enabled
automatically unless the mode is set to
:data:`ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS`.
Externalized checkpoints write their meta data out to persistent storage and are **not**
automatically cleaned up when the owning job fails or is suspended (terminating with job
status ``FAILED`` or ``SUSPENDED``). In this case, you have to manually clean up the
checkpoint state, both the meta data and actual program state.
The :class:`ExternalizedCheckpointCleanup` mode defines how an externalized checkpoint
should be cleaned up on job cancellation. If you choose to retain externalized checkpoints
on cancellation you have to handle checkpoint clean-up manually when you cancel the job as
well (terminating with job status ``CANCELED``).
The target directory for externalized checkpoints is configured via
``org.apache.flink.configuration.CheckpointingOptions#CHECKPOINTS_DIRECTORY``.
Example:
::
>>> config.set_externalized_checkpoint_cleanup(
... ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION)
:param cleanup_mode: Externalized checkpoint clean-up behaviour, the mode could be
:data:`ExternalizedCheckpointCleanup.DELETE_ON_CANCELLATION`,
:data:`ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION` or
:data:`ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS`
"""
self._j_checkpoint_config.setExternalizedCheckpointCleanup(
ExternalizedCheckpointCleanup._to_j_externalized_checkpoint_cleanup(cleanup_mode))
return self
def is_externalized_checkpoints_enabled(self) -> bool:
"""
Returns whether checkpoints should be persisted externally.
:return: ``True`` if checkpoints should be externalized, false otherwise.
"""
return self._j_checkpoint_config.isExternalizedCheckpointsEnabled()
def get_externalized_checkpoint_cleanup(self) -> Optional['ExternalizedCheckpointCleanup']:
"""
Returns the cleanup behaviour for externalized checkpoints.
:return: The cleanup behaviour for externalized checkpoints or ``None`` if none is
configured.
"""
cleanup_mode = self._j_checkpoint_config.getExternalizedCheckpointCleanup()
if cleanup_mode is None:
return None
else:
return ExternalizedCheckpointCleanup._from_j_externalized_checkpoint_cleanup(
cleanup_mode)
def is_unaligned_checkpoints_enabled(self) -> bool:
"""
Returns whether unaligned checkpoints are enabled.
:return: ``True`` if unaligned checkpoints are enabled.
"""
return self._j_checkpoint_config.isUnalignedCheckpointsEnabled()
def enable_unaligned_checkpoints(self, enabled: bool = True) -> 'CheckpointConfig':
"""
Enables unaligned checkpoints, which greatly reduce checkpointing times under backpressure.
Unaligned checkpoints contain data stored in buffers as part of the checkpoint state, which
allows checkpoint barriers to overtake these buffers. Thus, the checkpoint duration becomes
independent of the current throughput as checkpoint barriers are effectively not embedded
into the stream of data anymore.
Unaligned checkpoints can only be enabled if :func:`get_checkpointing_mode` is
:data:`CheckpointingMode.EXACTLY_ONCE`.
:param enabled: ``True`` if a checkpoints should be taken in unaligned mode.
"""
self._j_checkpoint_config.enableUnalignedCheckpoints(enabled)
return self
def disable_unaligned_checkpoints(self) -> 'CheckpointConfig':
"""
Enables unaligned checkpoints, which greatly reduce checkpointing times under backpressure
(experimental).
Unaligned checkpoints contain data stored in buffers as part of the checkpoint state, which
allows checkpoint barriers to overtake these buffers. Thus, the checkpoint duration becomes
independent of the current throughput as checkpoint barriers are effectively not embedded
into the stream of data anymore.
Unaligned checkpoints can only be enabled if :func:`get_checkpointing_mode` is
:data:`CheckpointingMode.EXACTLY_ONCE`.
"""
self.enable_unaligned_checkpoints(False)
return self
def set_alignment_timeout(self, alignment_timeout: Duration) -> 'CheckpointConfig':
"""
Only relevant if :func:`enable_unaligned_checkpoints` is enabled.
If ``alignment_timeout`` has value equal to ``0``, checkpoints will always start unaligned.
If ``alignment_timeout`` has value greater then ``0``, checkpoints will start aligned. If
during checkpointing, checkpoint start delay exceeds this ``alignment_timeout``, alignment
will timeout and checkpoint will start working as unaligned checkpoint.
:param alignment_timeout: The duration until the aligned checkpoint will be converted into
an unaligned checkpoint.
"""
self._j_checkpoint_config.setAlignmentTimeout(alignment_timeout._j_duration)
return self
def get_alignment_timeout(self) -> 'Duration':
"""
Returns the alignment timeout, as configured via :func:`set_alignment_timeout` or
``org.apache.flink.streaming.api.environment.ExecutionCheckpointingOptions#ALIGNMENT_TIMEOUT``.
:return: the alignment timeout.
"""
return Duration(self._j_checkpoint_config.getAlignmentTimeout())
def set_force_unaligned_checkpoints(
self,
force_unaligned_checkpoints: bool = True) -> 'CheckpointConfig':
"""
Checks whether unaligned checkpoints are forced, despite currently non-checkpointable
iteration feedback or custom partitioners.
:param force_unaligned_checkpoints: The flag to force unaligned checkpoints.
"""
self._j_checkpoint_config.setForceUnalignedCheckpoints(force_unaligned_checkpoints)
return self
def is_force_unaligned_checkpoints(self) -> 'bool':
"""
Checks whether unaligned checkpoints are forced, despite iteration feedback or custom
partitioners.
:return: True, if unaligned checkpoints are forced, false otherwise.
"""
return self._j_checkpoint_config.isForceUnalignedCheckpoints()
def set_checkpoint_storage(self, storage: CheckpointStorage) -> 'CheckpointConfig':
"""
Checkpoint storage defines how stat backends checkpoint their state for fault
tolerance in streaming applications. Various implementations store their checkpoints
in different fashions and have different requirements and availability guarantees.
For example, `JobManagerCheckpointStorage` stores checkpoints in the memory of the
JobManager. It is lightweight and without additional dependencies but is not highly
available and only supports small state sizes. This checkpoint storage policy is convenient
for local testing and development.
The `FileSystemCheckpointStorage` stores checkpoints in a filesystem. For systems like
HDFS, NFS Drivs, S3, and GCS, this storage policy supports large state size, in the
magnitude of many terabytes while providing a highly available foundation for stateful
applications. This checkpoint storage policy is recommended for most production deployments.
"""
self._j_checkpoint_config.setCheckpointStorage(storage._j_checkpoint_storage)
return self
def set_checkpoint_storage_dir(self, checkpoint_path: str) -> 'CheckpointConfig':
"""
Configures the application to write out checkpoint snapshots to the configured directory.
See `FileSystemCheckpointStorage` for more details on checkpointing to a file system.
"""
self._j_checkpoint_config.setCheckpointStorage(checkpoint_path)
return self
def get_checkpoint_storage(self) -> Optional[CheckpointStorage]:
"""
The checkpoint storage that has been configured for the Job, or None if
none has been set.
"""
j_storage = self._j_checkpoint_config.getCheckpointStorage()
if j_storage is None:
return None
else:
return _from_j_checkpoint_storage(j_storage)
class ExternalizedCheckpointCleanup(Enum):
"""
Cleanup behaviour for externalized checkpoints when the job is cancelled.
:data:`DELETE_ON_CANCELLATION`:
Delete externalized checkpoints on job cancellation.
All checkpoint state will be deleted when you cancel the owning
job, both the meta data and actual program state. Therefore, you
cannot resume from externalized checkpoints after the job has been
cancelled.
Note that checkpoint state is always kept if the job terminates
with state ``FAILED``.
:data:`RETAIN_ON_CANCELLATION`:
Retain externalized checkpoints on job cancellation.
All checkpoint state is kept when you cancel the owning job. You
have to manually delete both the checkpoint meta data and actual
program state after cancelling the job.
Note that checkpoint state is always kept if the job terminates
with state ``FAILED``.
:data:`NO_EXTERNALIZED_CHECKPOINTS`:
Externalized checkpoints are disabled completely.
"""
DELETE_ON_CANCELLATION = 0
RETAIN_ON_CANCELLATION = 1
NO_EXTERNALIZED_CHECKPOINTS = 2
@staticmethod
def _from_j_externalized_checkpoint_cleanup(j_cleanup_mode) \
-> 'ExternalizedCheckpointCleanup':
return ExternalizedCheckpointCleanup[j_cleanup_mode.name()]
def _to_j_externalized_checkpoint_cleanup(self):
gateway = get_gateway()
JExternalizedCheckpointCleanup = \
gateway.jvm.org.apache.flink.streaming.api.environment.CheckpointConfig \
.ExternalizedCheckpointCleanup
return getattr(JExternalizedCheckpointCleanup, self.name)
| 22,936 | 43.45155 | 103 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/state_backend.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from enum import Enum
from py4j.java_gateway import get_java_class
from typing import List, Optional
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import load_java_class
__all__ = [
'StateBackend',
'HashMapStateBackend',
'EmbeddedRocksDBStateBackend',
'MemoryStateBackend',
'FsStateBackend',
'RocksDBStateBackend',
'CustomStateBackend',
'PredefinedOptions']
def _from_j_state_backend(j_state_backend):
if j_state_backend is None:
return None
gateway = get_gateway()
JStateBackend = gateway.jvm.org.apache.flink.runtime.state.StateBackend
JHashMapStateBackend = gateway.jvm.org.apache.flink.runtime.state.hashmap.HashMapStateBackend
JEmbeddedRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state.\
EmbeddedRocksDBStateBackend
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory.MemoryStateBackend
JFsStateBackend = gateway.jvm.org.apache.flink.runtime.state.filesystem.FsStateBackend
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state.RocksDBStateBackend
j_clz = j_state_backend.getClass()
if not get_java_class(JStateBackend).isAssignableFrom(j_clz):
raise TypeError("The input %s is not an instance of StateBackend." % j_state_backend)
if get_java_class(JHashMapStateBackend).isAssignableFrom(j_state_backend.getClass()):
return HashMapStateBackend(j_hashmap_state_backend=j_state_backend.getClass())
elif get_java_class(JEmbeddedRocksDBStateBackend).isAssignableFrom(j_state_backend.getClass()):
return EmbeddedRocksDBStateBackend(j_embedded_rocks_db_state_backend=j_state_backend)
elif get_java_class(JMemoryStateBackend).isAssignableFrom(j_state_backend.getClass()):
return MemoryStateBackend(j_memory_state_backend=j_state_backend)
elif get_java_class(JFsStateBackend).isAssignableFrom(j_state_backend.getClass()):
return FsStateBackend(j_fs_state_backend=j_state_backend)
elif get_java_class(JRocksDBStateBackend).isAssignableFrom(j_state_backend.getClass()):
return RocksDBStateBackend(j_rocks_db_state_backend=j_state_backend)
else:
return CustomStateBackend(j_state_backend) # users' customized state backend
class StateBackend(object, metaclass=ABCMeta):
"""
A **State Backend** defines how the state of a streaming application is stored locally within
the cluster. Different state backends store their state in different fashions, and use different
data structures to hold the state of running applications.
For example, the :class:`HashMapStateBackend` keeps working state in the memory of the
TaskManager. The backend is lightweight and without additional dependencies.
The :class:`EmbeddedRocksDBStateBackend` keeps working state in the memory of the TaskManager
and stores state checkpoints in a filesystem(typically a replicated highly-available filesystem,
like `HDFS <https://hadoop.apache.org/>`_, `Ceph <https://ceph.com/>`_,
`S3 <https://aws.amazon.com/documentation/s3/>`_, `GCS <https://cloud.google.com/storage/>`_,
etc).
The :class:`EmbeddedRocksDBStateBackend` stores working state in an embedded
`RocksDB <http://rocksdb.org/>`_, instance and is able to scale working state to many
terrabytes in size, only limited by available disk space across all task managers.
**Raw Bytes Storage and Backends**
The :class:`StateBackend` creates services for *raw bytes storage* and for *keyed state*
and *operator state*.
The `org.apache.flink.runtime.state.AbstractKeyedStateBackend and
`org.apache.flink.runtime.state.OperatorStateBackend` created by this state backend define how
to hold the working state for keys and operators. They also define how to checkpoint that
state, frequently using the raw bytes storage (via the
`org.apache.flink.runtime.state.CheckpointStreamFactory`). However, it is also possible that
for example a keyed state backend simply implements the bridge to a key/value store, and that
it does not need to store anything in the raw byte storage upon a checkpoint.
**Serializability**
State Backends need to be serializable(`java.io.Serializable`), because they distributed
across parallel processes (for distributed execution) together with the streaming application
code.
Because of that, :class:`StateBackend` implementations are meant to be like *factories* that
create the proper states stores that provide access to the persistent storage and hold the
keyed- and operator state data structures. That way, the State Backend can be very lightweight
(contain only configurations) which makes it easier to be serializable.
**Thread Safety**
State backend implementations have to be thread-safe. Multiple threads may be creating
streams and keyed-/operator state backends concurrently.
"""
def __init__(self, j_state_backend):
self._j_state_backend = j_state_backend
class HashMapStateBackend(StateBackend):
"""
This state backend holds the working state in the memory (JVM heap) of the TaskManagers
and checkpoints based on the configured CheckpointStorage.
**State Size Considerations**
Working state is kept on the TaskManager heap. If a TaskManager executes multiple
tasks concurrently (if the TaskManager has multiple slots, or if slot-sharing is used)
then the aggregate state of all tasks needs to fit into that TaskManager's memory.
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
def __init__(self, j_hashmap_state_backend=None):
"""
Creates a new MemoryStateBackend, setting optionally the paths to persist checkpoint
metadata and savepoints to, as well as configuring state thresholds and asynchronous
operations.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> state_backend = HashMapStateBackend()
:param j_hashmap_state_backend: For internal use, please keep none.
"""
if j_hashmap_state_backend is None:
gateway = get_gateway()
JHashMapStateBackend = gateway.jvm.org.apache.flink.runtime.state.hashmap\
.HashMapStateBackend
j_hashmap_state_backend = JHashMapStateBackend()
super(HashMapStateBackend, self).__init__(j_hashmap_state_backend)
def __str__(self):
return self._j_state_backend.toString()
class EmbeddedRocksDBStateBackend(StateBackend):
"""
A State Backend that stores its state in an embedded ``RocksDB`` instance. This state backend
can store very large state that exceeds memory and spills to local disk.
All key/value state (including windows) is stored in the key/value index of RocksDB.
For persistence against loss of machines, please configure a CheckpointStorage instance
for the Job.
The behavior of the RocksDB instances can be parametrized by setting RocksDB Options
using the methods :func:`set_predefined_options` and :func:`set_options`.
"""
def __init__(self,
enable_incremental_checkpointing=None,
j_embedded_rocks_db_state_backend=None):
"""
Creates a new :class:`EmbeddedRocksDBStateBackend` for storing local state.
Example:
::
>>> state_backend = EmbeddedRocksDBStateBackend()
:param enable_incremental_checkpointing: True if incremental checkpointing is enabled.
:param j_embedded_rocks_db_state_backend: For internal use, please keep none.
"""
if j_embedded_rocks_db_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JEmbeddedRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state \
.EmbeddedRocksDBStateBackend
if enable_incremental_checkpointing not in (None, True, False):
raise TypeError("Unsupported input for 'enable_incremental_checkpointing': %s, "
"the value of the parameter should be None or"
"True or False.")
if enable_incremental_checkpointing is None:
j_enable_incremental_checkpointing = JTernaryBoolean.UNDEFINED
elif enable_incremental_checkpointing is True:
j_enable_incremental_checkpointing = JTernaryBoolean.TRUE
else:
j_enable_incremental_checkpointing = JTernaryBoolean.FALSE
j_embedded_rocks_db_state_backend = \
JEmbeddedRocksDBStateBackend(j_enable_incremental_checkpointing)
super(EmbeddedRocksDBStateBackend, self).__init__(j_embedded_rocks_db_state_backend)
def set_db_storage_paths(self, *paths: str):
"""
Sets the directories in which the local RocksDB database puts its files (like SST and
metadata files). These directories do not need to be persistent, they can be ephemeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
Each distinct state will be stored in one path, but when the state backend creates
multiple states, they will store their files on different paths.
Passing ``None`` to this function restores the default behavior, where the configured
temp directories will be used.
:param paths: The paths across which the local RocksDB database files will be spread. this
parameter is optional.
"""
if len(paths) < 1:
self._j_state_backend.setDbStoragePath(None)
else:
gateway = get_gateway()
j_path_array = gateway.new_array(gateway.jvm.String, len(paths))
for i in range(0, len(paths)):
j_path_array[i] = paths[i]
self._j_state_backend.setDbStoragePaths(j_path_array)
def get_db_storage_paths(self) -> List[str]:
"""
Gets the configured local DB storage paths, or null, if none were configured.
Under these directories on the TaskManager, RocksDB stores its SST files and
metadata files. These directories do not need to be persistent, they can be ephermeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
:return: The list of configured local DB storage paths.
"""
return list(self._j_state_backend.getDbStoragePaths())
def is_incremental_checkpoints_enabled(self) -> bool:
"""
Gets whether incremental checkpoints are enabled for this state backend.
:return: True if incremental checkpoints are enabled, false otherwise.
"""
return self._j_state_backend.isIncrementalCheckpointsEnabled()
def set_predefined_options(self, options: 'PredefinedOptions'):
"""
Sets the predefined options for RocksDB.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the here specified
predefined options and customized options.
Example:
::
>>> state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
:param options: The options to set (must not be null), see :class:`PredefinedOptions`.
"""
self._j_state_backend\
.setPredefinedOptions(options._to_j_predefined_options())
def get_predefined_options(self) -> 'PredefinedOptions':
"""
Gets the current predefined options for RocksDB.
The default options (if nothing was set via :func:`setPredefinedOptions`)
are :data:`PredefinedOptions.DEFAULT`.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the predefined and customized
options.
.. seealso:: :func:`set_predefined_options`
:return: Current predefined options.
"""
j_predefined_options = self._j_state_backend.getPredefinedOptions()
return PredefinedOptions._from_j_predefined_options(j_predefined_options)
def set_options(self, options_factory_class_name: str):
"""
Sets ``org.rocksdb.Options`` for the RocksDB instances.
Because the options are not serializable and hold native code references,
they must be specified through a factory.
The options created by the factory here are applied on top of the pre-defined
options profile selected via :func:`set_predefined_options` and user-configured
options from configuration set through flink-conf.yaml with keys in
``RocksDBConfigurableOptions``.
:param options_factory_class_name: The fully-qualified class name of the options
factory in Java that lazily creates the RocksDB options.
The options factory must have a default constructor.
"""
gateway = get_gateway()
JOptionsFactory = gateway.jvm.org.apache.flink.contrib.streaming.state.RocksDBOptionsFactory
j_options_factory_clz = load_java_class(options_factory_class_name)
if not get_java_class(JOptionsFactory).isAssignableFrom(j_options_factory_clz):
raise ValueError("The input class does not implement RocksDBOptionsFactory.")
self._j_state_backend\
.setRocksDBOptions(j_options_factory_clz.newInstance())
def get_options(self) -> Optional[str]:
"""
Gets the fully-qualified class name of the options factory in Java that lazily creates
the RocksDB options.
:return: The fully-qualified class name of the options factory in Java.
"""
j_options_factory = self._j_state_backend.getRocksDBOptions()
if j_options_factory is not None:
return j_options_factory.getClass().getName()
else:
return None
def get_number_of_transfer_threads(self) -> int:
"""
Gets the number of threads used to transfer files while snapshotting/restoring.
:return: The number of threads used to transfer files while snapshotting/restoring.
"""
return self._j_state_backend.getNumberOfTransferThreads()
def set_number_of_transfer_threads(self, number_of_transfering_threads: int):
"""
Sets the number of threads used to transfer files while snapshotting/restoring.
:param number_of_transfering_threads: The number of threads used to transfer files while
snapshotting/restoring.
"""
self._j_state_backend\
.setNumberOfTransferThreads(number_of_transfering_threads)
def __str__(self):
return self._j_state_backend.toString()
class MemoryStateBackend(StateBackend):
"""
**IMPORTANT** `MemoryStateBackend` is deprecated in favor of `HashMapStateBackend` and
`JobManagerCheckpointStorage`. This change does not affect the runtime characteristics of your
Jobs and is simply an API change to help better communicate the ways Flink separates local state
storage from fault tolerance. Jobs can be upgraded without loss of state. If configuring
your state backend via the `StreamExecutionEnvironment` please make the following changes.
::
>> env.set_state_backend(HashMapStateBackend())
>> env.get_checkpoint_config().set_checkpoint_storage(JobManagerCheckpointStorage())
If you are configuring your state backend via the `flink-conf.yaml` please make the following
changes.
```
state.backend.type: hashmap
state.checkpoint-storage: jobmanager
```
This state backend holds the working state in the memory (JVM heap) of the TaskManagers.
The state backend checkpoints state directly to the JobManager's memory (hence the backend's
name), but the checkpoints will be persisted to a file system for high-availability setups and
savepoints. The MemoryStateBackend is consequently a FileSystem-based backend that can work
without a file system dependency in simple setups.
This state backend should be used only for experimentation, quick local setups,
or for streaming applications that have very small state: Because it requires checkpoints to
go through the JobManager's memory, larger state will occupy larger portions of the
JobManager's main memory, reducing operational stability.
For any other setup, the :class:`FsStateBackend` should be used. The :class:`FsStateBackend`
holds the working state on the TaskManagers in the same way, but checkpoints state directly to
files rather then to the JobManager's memory, thus supporting large state sizes.
**State Size Considerations**
State checkpointing with this state backend is subject to the following conditions:
- Each individual state must not exceed the configured maximum state size
(see :func:`get_max_state_size`.
- All state from one task (i.e., the sum of all operator states and keyed states from all
chained operators of the task) must not exceed what the RPC system supports, which is
be default < 10 MB. That limit can be configured up, but that is typically not advised.
- The sum of all states in the application times all retained checkpoints must comfortably
fit into the JobManager's JVM heap space.
**Persistence Guarantees**
For the use cases where the state sizes can be handled by this backend, the backend does
guarantee persistence for savepoints, externalized checkpoints (of configured), and checkpoints
(when high-availability is configured).
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
# The default maximal size that the snapshotted memory state may have (5 MiBytes).
DEFAULT_MAX_STATE_SIZE = 5 * 1024 * 1024
def __init__(self,
checkpoint_path=None,
savepoint_path=None,
max_state_size=None,
using_asynchronous_snapshots=None,
j_memory_state_backend=None):
"""
Creates a new MemoryStateBackend, setting optionally the paths to persist checkpoint
metadata and savepoints to, as well as configuring state thresholds and asynchronous
operations.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> state_backend = MemoryStateBackend()
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param savepoint_path: The path to write savepoints to. If none, the value from
the runtime configuration will be used.
:param max_state_size: The maximal size of the serialized state. If none, the
:data:`DEFAULT_MAX_STATE_SIZE` will be used.
:param using_asynchronous_snapshots: Snapshots are now always asynchronous. This flag
has no effect anymore in this version.
:param j_memory_state_backend: For internal use, please keep none.
"""
if j_memory_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JMemoryStateBackend = gateway.jvm.org.apache.flink.runtime.state.memory\
.MemoryStateBackend
if using_asynchronous_snapshots is None:
j_asynchronous_snapshots = JTernaryBoolean.UNDEFINED
elif using_asynchronous_snapshots is True:
j_asynchronous_snapshots = JTernaryBoolean.TRUE
elif using_asynchronous_snapshots is False:
j_asynchronous_snapshots = JTernaryBoolean.FALSE
else:
raise TypeError("Unsupported input for 'using_asynchronous_snapshots': %s, "
"the value of the parameter should be None or"
"True or False.")
if max_state_size is None:
max_state_size = JMemoryStateBackend.DEFAULT_MAX_STATE_SIZE
j_memory_state_backend = JMemoryStateBackend(checkpoint_path,
savepoint_path,
max_state_size,
j_asynchronous_snapshots)
self._j_memory_state_backend = j_memory_state_backend
super(MemoryStateBackend, self).__init__(j_memory_state_backend)
def get_max_state_size(self) -> int:
"""
Gets the maximum size that an individual state can have, as configured in the
constructor (by default :data:`DEFAULT_MAX_STATE_SIZE`).
:return: The maximum size that an individual state can have.
"""
return self._j_memory_state_backend.getMaxStateSize()
def is_using_asynchronous_snapshots(self) -> bool:
"""
Gets whether the key/value data structures are asynchronously snapshotted.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.ASYNC_SNAPSHOTS``.
:return: True if the key/value data structures are asynchronously snapshotted,
false otherwise.
"""
return self._j_memory_state_backend.isUsingAsynchronousSnapshots()
def __str__(self):
return self._j_memory_state_backend.toString()
class FsStateBackend(StateBackend):
"""
**IMPORTANT** `FsStateBackend is deprecated in favor of `HashMapStateBackend` and
`FileSystemCheckpointStorage`. This change does not affect the runtime characteristics
of your Jobs and is simply an API change to help better communicate the ways Flink separates
local state storage from fault tolerance. Jobs can be upgraded without loss of state. If
configuring your state backend via the `StreamExecutionEnvironment` please make the following
changes.
::
>> env.set_state_backend(HashMapStateBackend())
>> env.get_checkpoint_config().set_checkpoint_storage("hdfs://checkpoints")
If you are configuring your state backend via the `flink-conf.yaml` please set your state
backend type to `hashmap`.
This state backend holds the working state in the memory (JVM heap) of the TaskManagers.
The state backend checkpoints state as files to a file system (hence the backend's name).
Each checkpoint individually will store all its files in a subdirectory that includes the
checkpoint number, such as ``hdfs://namenode:port/flink-checkpoints/chk-17/``.
**State Size Considerations**
Working state is kept on the TaskManager heap. If a TaskManager executes multiple
tasks concurrently (if the TaskManager has multiple slots, or if slot-sharing is used)
then the aggregate state of all tasks needs to fit into that TaskManager's memory.
This state backend stores small state chunks directly with the metadata, to avoid creating
many small files. The threshold for that is configurable. When increasing this threshold, the
size of the checkpoint metadata increases. The checkpoint metadata of all retained completed
checkpoints needs to fit into the JobManager's heap memory. This is typically not a problem,
unless the threshold :func:`get_min_file_size_threshold` is increased significantly.
**Persistence Guarantees**
Checkpoints from this state backend are as persistent and available as filesystem that is
written to. If the file system is a persistent distributed file system, this state backend
supports highly available setups. The backend additionally supports savepoints and externalized
checkpoints.
**Configuration**
As for all state backends, this backend can either be configured within the application (by
creating the backend with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the state backend was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
def __init__(self,
checkpoint_directory_uri=None,
default_savepoint_directory_uri=None,
file_state_size_threshold=None,
write_buffer_size=None,
using_asynchronous_snapshots=None,
j_fs_state_backend=None):
"""
Creates a new state backend that stores its checkpoint data in the file system and location
defined by the given URI.
A file system for the file system scheme in the URI (e.g., 'file://', 'hdfs://', or
'S3://') must be accessible via ``org.apache.flink.core.fs.FileSystem.get(URI)``.
For a state backend targeting HDFS, this means that the URI must either specify the
authority (host and port), or that the Hadoop configuration that describes that information
must be in the classpath.
Example:
::
>>> state_backend = FsStateBackend("file://var/checkpoints/")
:param checkpoint_directory_uri: The path to write checkpoint metadata to, required.
:param default_savepoint_directory_uri: The path to write savepoints to. If none, the value
from the runtime configuration will be used, or
savepoint target locations need to be passed when
triggering a savepoint.
:param file_state_size_threshold: State below this size will be stored as part of the
metadata, rather than in files. If none, the value
configured in the runtime configuration will be used, or
the default value (1KB) if nothing is configured.
:param write_buffer_size: Write buffer size used to serialize state. If -1, the value
configured in the runtime configuration will be used, or the
default value (4KB) if nothing is configured.
:param using_asynchronous_snapshots: Snapshots are now always asynchronous. This flag
has no effect anymore in this version.
:param j_fs_state_backend: For internal use, please keep none.
"""
if j_fs_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JFsStateBackend = gateway.jvm.org.apache.flink.runtime.state.filesystem\
.FsStateBackend
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_directory_uri is None:
raise ValueError("The parameter 'checkpoint_directory_uri' is required!")
j_checkpoint_directory_uri = JPath(checkpoint_directory_uri).toUri()
if default_savepoint_directory_uri is None:
j_default_savepoint_directory_uri = None
else:
j_default_savepoint_directory_uri = JPath(default_savepoint_directory_uri).toUri()
if file_state_size_threshold is None:
file_state_size_threshold = -1
if write_buffer_size is None:
write_buffer_size = -1
if using_asynchronous_snapshots is None:
j_asynchronous_snapshots = JTernaryBoolean.UNDEFINED
elif using_asynchronous_snapshots is True:
j_asynchronous_snapshots = JTernaryBoolean.TRUE
elif using_asynchronous_snapshots is False:
j_asynchronous_snapshots = JTernaryBoolean.FALSE
else:
raise TypeError("Unsupported input for 'using_asynchronous_snapshots': %s, "
"the value of the parameter should be None or"
"True or False.")
j_fs_state_backend = JFsStateBackend(j_checkpoint_directory_uri,
j_default_savepoint_directory_uri,
file_state_size_threshold,
write_buffer_size,
j_asynchronous_snapshots)
self._j_fs_state_backend = j_fs_state_backend
super(FsStateBackend, self).__init__(j_fs_state_backend)
def get_checkpoint_path(self) -> str:
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
return self._j_fs_state_backend.getCheckpointPath().toString()
def get_min_file_size_threshold(self) -> int:
"""
Gets the threshold below which state is stored as part of the metadata, rather than in
files. This threshold ensures that the backend does not create a large amount of very
small files, where potentially the file pointers are larger than the state itself.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.FS_SMALL_FILE_THRESHOLD``.
:return: The file size threshold, in bytes.
"""
return self._j_fs_state_backend.getMinFileSizeThreshold()
def is_using_asynchronous_snapshots(self) -> bool:
"""
Gets whether the key/value data structures are asynchronously snapshotted.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.ASYNC_SNAPSHOTS``.
:return: True if the key/value data structures are asynchronously snapshotted,
false otherwise.
"""
return self._j_fs_state_backend.isUsingAsynchronousSnapshots()
def get_write_buffer_size(self) -> int:
"""
Gets the write buffer size for created checkpoint stream.
If not explicitly configured, this is the default value of
``org.apache.flink.configuration.CheckpointingOptions.FS_WRITE_BUFFER_SIZE``.
:return: The write buffer size, in bytes.
"""
return self._j_fs_state_backend.getWriteBufferSize()
class RocksDBStateBackend(StateBackend):
"""
**IMPORTANT** `RocksDBStateBackend` is deprecated in favor of `EmbeddedRocksDBStateBackend`
and `FileSystemCheckpointStorage`. This change does not affect the runtime characteristics of
your Jobs and is simply an API change to help better communicate the ways Flink separates
local state storage from fault tolerance. Jobs can be upgraded without loss of state. If
configuring your state backend via the `StreamExecutionEnvironment` please make the following
changes.
::
>> env.set_state_backend(EmbeddedRocksDBStateBackend())
>> env.get_checkpoint_config().set_checkpoint_storage("hdfs://checkpoints")
If you are configuring your state backend via the `flink-conf.yaml` no changes are required.
A State Backend that stores its state in ``RocksDB``. This state backend can
store very large state that exceeds memory and spills to disk.
All key/value state (including windows) is stored in the key/value index of RocksDB.
For persistence against loss of machines, checkpoints take a snapshot of the
RocksDB database, and persist that snapshot in a file system (by default) or
another configurable state backend.
The behavior of the RocksDB instances can be parametrized by setting RocksDB Options
using the methods :func:`set_predefined_options` and :func:`set_options`.
"""
def __init__(self,
checkpoint_data_uri=None,
enable_incremental_checkpointing=None,
checkpoint_stream_backend=None,
j_rocks_db_state_backend=None):
"""
Creates a new :class:`RocksDBStateBackend` that stores its checkpoint data in the given
state backend or the location of given URI.
If using state backend, typically, one would supply a filesystem or database state backend
here where the snapshots from RocksDB would be stored.
If using URI, a state backend that stores checkpoints in HDFS or S3 must specify the file
system host and port in the URI, or have the Hadoop configuration that describes the file
system (host / high-availability group / possibly credentials) either referenced from the
Flink config, or included in the classpath.
Example:
::
>>> state_backend = RocksDBStateBackend("file://var/checkpoints/")
:param checkpoint_data_uri: The URI describing the filesystem and path to the checkpoint
data directory.
:param enable_incremental_checkpointing: True if incremental checkpointing is enabled.
:param checkpoint_stream_backend: The backend write the checkpoint streams to.
:param j_rocks_db_state_backend: For internal use, please keep none.
"""
if j_rocks_db_state_backend is None:
gateway = get_gateway()
JTernaryBoolean = gateway.jvm.org.apache.flink.util.TernaryBoolean
JRocksDBStateBackend = gateway.jvm.org.apache.flink.contrib.streaming.state \
.RocksDBStateBackend
if enable_incremental_checkpointing not in (None, True, False):
raise TypeError("Unsupported input for 'enable_incremental_checkpointing': %s, "
"the value of the parameter should be None or"
"True or False.")
if checkpoint_data_uri is not None:
if enable_incremental_checkpointing is None:
j_rocks_db_state_backend = JRocksDBStateBackend(checkpoint_data_uri)
else:
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_data_uri, enable_incremental_checkpointing)
elif isinstance(checkpoint_stream_backend, StateBackend):
if enable_incremental_checkpointing is None:
j_enable_incremental_checkpointing = JTernaryBoolean.UNDEFINED
elif enable_incremental_checkpointing is True:
j_enable_incremental_checkpointing = JTernaryBoolean.TRUE
else:
j_enable_incremental_checkpointing = JTernaryBoolean.FALSE
j_rocks_db_state_backend = \
JRocksDBStateBackend(checkpoint_stream_backend._j_state_backend,
j_enable_incremental_checkpointing)
self._j_rocks_db_state_backend = j_rocks_db_state_backend
super(RocksDBStateBackend, self).__init__(j_rocks_db_state_backend)
def get_checkpoint_backend(self):
"""
Gets the state backend that this RocksDB state backend uses to persist
its bytes to.
This RocksDB state backend only implements the RocksDB specific parts, it
relies on the 'CheckpointBackend' to persist the checkpoint and savepoint bytes
streams.
:return: The state backend to persist the checkpoint and savepoint bytes streams.
"""
j_state_backend = self._j_rocks_db_state_backend.getCheckpointBackend()
return _from_j_state_backend(j_state_backend)
def set_db_storage_paths(self, *paths: str):
"""
Sets the directories in which the local RocksDB database puts its files (like SST and
metadata files). These directories do not need to be persistent, they can be ephemeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
Each distinct state will be stored in one path, but when the state backend creates
multiple states, they will store their files on different paths.
Passing ``None`` to this function restores the default behavior, where the configured
temp directories will be used.
:param paths: The paths across which the local RocksDB database files will be spread. this
parameter is optional.
"""
if len(paths) < 1:
self._j_rocks_db_state_backend.setDbStoragePath(None)
else:
gateway = get_gateway()
j_path_array = gateway.new_array(gateway.jvm.String, len(paths))
for i in range(0, len(paths)):
j_path_array[i] = paths[i]
self._j_rocks_db_state_backend.setDbStoragePaths(j_path_array)
def get_db_storage_paths(self) -> List[str]:
"""
Gets the configured local DB storage paths, or null, if none were configured.
Under these directories on the TaskManager, RocksDB stores its SST files and
metadata files. These directories do not need to be persistent, they can be ephermeral,
meaning that they are lost on a machine failure, because state in RocksDB is persisted
in checkpoints.
If nothing is configured, these directories default to the TaskManager's local
temporary file directories.
:return: The list of configured local DB storage paths.
"""
return list(self._j_rocks_db_state_backend.getDbStoragePaths())
def is_incremental_checkpoints_enabled(self) -> bool:
"""
Gets whether incremental checkpoints are enabled for this state backend.
:return: True if incremental checkpoints are enabled, false otherwise.
"""
return self._j_rocks_db_state_backend.isIncrementalCheckpointsEnabled()
def set_predefined_options(self, options: 'PredefinedOptions'):
"""
Sets the predefined options for RocksDB.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the here specified
predefined options and customized options.
Example:
::
>>> state_backend.set_predefined_options(PredefinedOptions.SPINNING_DISK_OPTIMIZED)
:param options: The options to set (must not be null), see :class:`PredefinedOptions`.
"""
self._j_rocks_db_state_backend.setPredefinedOptions(options._to_j_predefined_options())
def get_predefined_options(self) -> 'PredefinedOptions':
"""
Gets the current predefined options for RocksDB.
The default options (if nothing was set via :func:`setPredefinedOptions`)
are :data:`PredefinedOptions.DEFAULT`.
If user-configured options within ``RocksDBConfigurableOptions`` is set (through
flink-conf.yaml) or a user-defined options factory is set (via :func:`setOptions`),
then the options from the factory are applied on top of the predefined and customized
options.
.. seealso:: :func:`set_predefined_options`
:return: Current predefined options.
"""
j_predefined_options = self._j_rocks_db_state_backend.getPredefinedOptions()
return PredefinedOptions._from_j_predefined_options(j_predefined_options)
def set_options(self, options_factory_class_name: str):
"""
Sets ``org.rocksdb.Options`` for the RocksDB instances.
Because the options are not serializable and hold native code references,
they must be specified through a factory.
The options created by the factory here are applied on top of the pre-defined
options profile selected via :func:`set_predefined_options`.
If the pre-defined options profile is the default (:data:`PredefinedOptions.DEFAULT`),
then the factory fully controls the RocksDB options.
:param options_factory_class_name: The fully-qualified class name of the options
factory in Java that lazily creates the RocksDB options.
The options factory must have a default constructor.
"""
gateway = get_gateway()
JOptionsFactory = gateway.jvm.org.apache.flink.contrib.streaming.state.RocksDBOptionsFactory
j_options_factory_clz = load_java_class(options_factory_class_name)
if not get_java_class(JOptionsFactory).isAssignableFrom(j_options_factory_clz):
raise ValueError("The input class does not implement RocksDBOptionsFactory.")
self._j_rocks_db_state_backend.setRocksDBOptions(j_options_factory_clz.newInstance())
def get_options(self) -> Optional[str]:
"""
Gets the fully-qualified class name of the options factory in Java that lazily creates
the RocksDB options.
:return: The fully-qualified class name of the options factory in Java.
"""
j_options_factory = self._j_rocks_db_state_backend.getRocksDBOptions()
if j_options_factory is not None:
return j_options_factory.getClass().getName()
else:
return None
def get_number_of_transfering_threads(self) -> int:
"""
Gets the number of threads used to transfer files while snapshotting/restoring.
:return: The number of threads used to transfer files while snapshotting/restoring.
"""
return self._j_rocks_db_state_backend.getNumberOfTransferingThreads()
def set_number_of_transfering_threads(self, number_of_transfering_threads: int):
"""
Sets the number of threads used to transfer files while snapshotting/restoring.
:param number_of_transfering_threads: The number of threads used to transfer files while
snapshotting/restoring.
"""
self._j_rocks_db_state_backend.setNumberOfTransferingThreads(number_of_transfering_threads)
def __str__(self):
return self._j_rocks_db_state_backend.toString()
class PredefinedOptions(Enum):
"""
The :class:`PredefinedOptions` are configuration settings for the :class:`RocksDBStateBackend`.
The various pre-defined choices are configurations that have been empirically
determined to be beneficial for performance under different settings.
Some of these settings are based on experiments by the Flink community, some follow
guides from the RocksDB project. If some configurations should be enabled unconditionally, they
are not included in any of the pre-defined options. See the documentation for
RocksDBResourceContainer in the Java API for further details. Note that setUseFsync(false) is
set by default irrespective of the :class:`PredefinedOptions` setting. Because Flink does not
rely on RocksDB data on disk for recovery, there is no need to sync data to stable storage.
:data:`DEFAULT`:
Default options for all settings. No additional options are set.
:data:`SPINNING_DISK_OPTIMIZED`:
Pre-defined options for regular spinning hard disks.
This constant configures RocksDB with some options that lead empirically
to better performance when the machines executing the system use
regular spinning hard disks.
The following options are set:
- setCompactionStyle(CompactionStyle.LEVEL)
- setLevelCompactionDynamicLevelBytes(true)
- setMaxBackgroundJobs(4)
- setMaxOpenFiles(-1)
:data:`SPINNING_DISK_OPTIMIZED_HIGH_MEM`:
Pre-defined options for better performance on regular spinning hard disks,
at the cost of a higher memory consumption.
.. note::
These settings will cause RocksDB to consume a lot of memory for
block caching and compactions. If you experience out-of-memory problems related to,
RocksDB, consider switching back to :data:`SPINNING_DISK_OPTIMIZED`.
The following options are set:
- BlockBasedTableConfig.setBlockCacheSize(256 MBytes)
- BlockBasedTableConfig.setBlockSize(128 KBytes)
- BlockBasedTableConfig.setFilterPolicy(BloomFilter(
`BLOOM_FILTER_BITS_PER_KEY`,
`BLOOM_FILTER_BLOCK_BASED_MODE`)
- setLevelCompactionDynamicLevelBytes(true)
- setMaxBackgroundJobs(4)
- setMaxBytesForLevelBase(1 GByte)
- setMaxOpenFiles(-1)
- setMaxWriteBufferNumber(4)
- setMinWriteBufferNumberToMerge(3)
- setTargetFileSizeBase(256 MBytes)
- setWriteBufferSize(64 MBytes)
The BLOOM_FILTER_BITS_PER_KEY and BLOOM_FILTER_BLOCK_BASED_MODE options are set via
`state.backend.rocksdb.bloom-filter.bits-per-key` and
`state.backend.rocksdb.bloom-filter.block-based-mode`, respectively.
:data:`FLASH_SSD_OPTIMIZED`:
Pre-defined options for Flash SSDs.
This constant configures RocksDB with some options that lead empirically
to better performance when the machines executing the system use SSDs.
The following options are set:
- setMaxBackgroundJobs(4)
- setMaxOpenFiles(-1)
"""
DEFAULT = 0
SPINNING_DISK_OPTIMIZED = 1
SPINNING_DISK_OPTIMIZED_HIGH_MEM = 2
FLASH_SSD_OPTIMIZED = 3
@staticmethod
def _from_j_predefined_options(j_predefined_options) -> 'PredefinedOptions':
return PredefinedOptions[j_predefined_options.name()]
def _to_j_predefined_options(self):
gateway = get_gateway()
JPredefinedOptions = gateway.jvm.org.apache.flink.contrib.streaming.state.PredefinedOptions
return getattr(JPredefinedOptions, self.name)
class CustomStateBackend(StateBackend):
"""
A wrapper of customized java state backend.
"""
def __init__(self, j_custom_state_backend):
super(CustomStateBackend, self).__init__(j_custom_state_backend)
| 50,081 | 46.561254 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/stream_execution_environment.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import tempfile
from typing import List, Any, Optional, cast
from py4j.java_gateway import JavaObject
from pyflink.common import Configuration, WatermarkStrategy
from pyflink.common.execution_config import ExecutionConfig
from pyflink.common.io import InputFormat
from pyflink.common.job_client import JobClient
from pyflink.common.job_execution_result import JobExecutionResult
from pyflink.common.restart_strategy import RestartStrategies, RestartStrategyConfiguration
from pyflink.common.typeinfo import TypeInformation, Types
from pyflink.datastream import SlotSharingGroup
from pyflink.datastream.checkpoint_config import CheckpointConfig
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.connectors import Source
from pyflink.datastream.data_stream import DataStream
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.functions import SourceFunction
from pyflink.datastream.state_backend import _from_j_state_backend, StateBackend
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.datastream.utils import ResultTypeQueryable
from pyflink.java_gateway import get_gateway
from pyflink.serializers import PickleSerializer
from pyflink.util.java_utils import load_java_class, add_jars_to_context_class_loader, \
invoke_method, get_field_value, is_local_deployment, get_j_env_configuration
__all__ = ['StreamExecutionEnvironment']
class StreamExecutionEnvironment(object):
"""
The StreamExecutionEnvironment is the context in which a streaming program is executed. A
*LocalStreamEnvironment* will cause execution in the attached JVM, a
*RemoteStreamEnvironment* will cause execution on a remote setup.
The environment provides methods to control the job execution (such as setting the parallelism
or the fault tolerance/checkpointing parameters) and to interact with the outside world (data
access).
"""
def __init__(self, j_stream_execution_environment, serializer=PickleSerializer()):
self._j_stream_execution_environment = j_stream_execution_environment
self.serializer = serializer
self._open()
def get_config(self) -> ExecutionConfig:
"""
Gets the config object.
:return: The :class:`~pyflink.common.ExecutionConfig` object.
"""
return ExecutionConfig(self._j_stream_execution_environment.getConfig())
def set_parallelism(self, parallelism: int) -> 'StreamExecutionEnvironment':
"""
Sets the parallelism for operations executed through this environment.
Setting a parallelism of x here will cause all operators (such as map,
batchReduce) to run with x parallel instances. This method overrides the
default parallelism for this environment. The
*LocalStreamEnvironment* uses by default a value equal to the
number of hardware contexts (CPU cores / threads). When executing the
program via the command line client from a JAR file, the default degree
of parallelism is the one configured for that setup.
:param parallelism: The parallelism.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int) -> 'StreamExecutionEnvironment':
"""
Sets the maximum degree of parallelism defined for the program. The upper limit (inclusive)
is 32768.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:param max_parallelism: Maximum degree of parallelism to be used for the program,
with 0 < maxParallelism <= 2^15.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setMaxParallelism(max_parallelism)
return self
def register_slot_sharing_group(self, slot_sharing_group: SlotSharingGroup) \
-> 'StreamExecutionEnvironment':
"""
Register a slot sharing group with its resource spec.
Note that a slot sharing group hints the scheduler that the grouped operators CAN be
deployed into a shared slot. There's no guarantee that the scheduler always deploy the
grouped operators together. In cases grouped operators are deployed into separate slots, the
slot resources will be derived from the specified group requirements.
:param slot_sharing_group: Which contains name and its resource spec.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.registerSlotSharingGroup(
slot_sharing_group.get_java_slot_sharing_group())
return self
def get_parallelism(self) -> int:
"""
Gets the parallelism with which operation are executed by default.
Operations can individually override this value to use a specific
parallelism.
:return: The parallelism used by operations, unless they override that value.
"""
return self._j_stream_execution_environment.getParallelism()
def get_max_parallelism(self) -> int:
"""
Gets the maximum degree of parallelism defined for the program.
The maximum degree of parallelism specifies the upper limit for dynamic scaling. It also
defines the number of key groups used for partitioned state.
:return: Maximum degree of parallelism.
"""
return self._j_stream_execution_environment.getMaxParallelism()
def set_runtime_mode(self, execution_mode: RuntimeExecutionMode):
"""
Sets the runtime execution mode for the application
:class:`~pyflink.datastream.execution_mode.RuntimeExecutionMode`. This
is equivalent to setting the `execution.runtime-mode` in your application's
configuration file.
We recommend users to NOT use this method but set the `execution.runtime-mode` using
the command-line when submitting the application. Keeping the application code
configuration-free allows for more flexibility as the same application will be able to be
executed in any execution mode.
:param execution_mode: The desired execution mode.
:return: The execution environment of your application.
.. versionadded:: 1.13.0
"""
return self._j_stream_execution_environment.setRuntimeMode(
execution_mode._to_j_execution_mode())
def set_buffer_timeout(self, timeout_millis: int) -> 'StreamExecutionEnvironment':
"""
Sets the maximum time frequency (milliseconds) for the flushing of the
output buffers. By default the output buffers flush frequently to provide
low latency and to aid smooth developer experience. Setting the parameter
can result in three logical modes:
- A positive integer triggers flushing periodically by that integer
- 0 triggers flushing after every record thus minimizing latency
- -1 triggers flushing only when the output buffer is full thus maximizing throughput
:param timeout_millis: The maximum time between two output flushes.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setBufferTimeout(timeout_millis)
return self
def get_buffer_timeout(self) -> int:
"""
Gets the maximum time frequency (milliseconds) for the flushing of the
output buffers. For clarification on the extremal values see
:func:`set_buffer_timeout`.
:return: The timeout of the buffer.
"""
return self._j_stream_execution_environment.getBufferTimeout()
def disable_operator_chaining(self) -> 'StreamExecutionEnvironment':
"""
Disables operator chaining for streaming operators. Operator chaining
allows non-shuffle operations to be co-located in the same thread fully
avoiding serialization and de-serialization.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.disableOperatorChaining()
return self
def is_chaining_enabled(self) -> bool:
"""
Returns whether operator chaining is enabled.
:return: True if chaining is enabled, false otherwise.
"""
return self._j_stream_execution_environment.isChainingEnabled()
def is_chaining_of_operators_with_different_max_parallelism_enabled(self) -> bool:
"""
Returns whether operators that have a different max parallelism can be chained.
:return: True if chaining is enabled, false otherwise
"""
return self._j_stream_execution_environment\
.isChainingOfOperatorsWithDifferentMaxParallelismEnabled()
def get_checkpoint_config(self) -> CheckpointConfig:
"""
Gets the checkpoint config, which defines values like checkpoint interval, delay between
checkpoints, etc.
:return: The :class:`~pyflink.datastream.CheckpointConfig`.
"""
j_checkpoint_config = self._j_stream_execution_environment.getCheckpointConfig()
return CheckpointConfig(j_checkpoint_config)
def enable_checkpointing(self, interval: int, mode: CheckpointingMode = None) \
-> 'StreamExecutionEnvironment':
"""
Enables checkpointing for the streaming job. The distributed state of the streaming
dataflow will be periodically snapshotted. In case of a failure, the streaming
dataflow will be restarted from the latest completed checkpoint.
The job draws checkpoints periodically, in the given interval. The system uses the
given :class:`~pyflink.datastream.CheckpointingMode` for the checkpointing ("exactly once"
vs "at least once"). The state will be stored in the configured state backend.
.. note::
Checkpointing iterative streaming dataflows in not properly supported at
the moment. For that reason, iterative jobs will not be started if used
with enabled checkpointing.
Example:
::
>>> env.enable_checkpointing(300000, CheckpointingMode.AT_LEAST_ONCE)
:param interval: Time interval between state checkpoints in milliseconds.
:param mode: The checkpointing mode, selecting between "exactly once" and "at least once"
guaranteed.
:return: This object.
"""
if mode is None:
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableCheckpointing(interval)
else:
j_checkpointing_mode = CheckpointingMode._to_j_checkpointing_mode(mode)
self._j_stream_execution_environment.enableCheckpointing(
interval,
j_checkpointing_mode)
return self
def get_checkpoint_interval(self) -> int:
"""
Returns the checkpointing interval or -1 if checkpointing is disabled.
Shorthand for get_checkpoint_config().get_checkpoint_interval().
:return: The checkpointing interval or -1.
"""
return self._j_stream_execution_environment.getCheckpointInterval()
def get_checkpointing_mode(self) -> CheckpointingMode:
"""
Returns the checkpointing mode (exactly-once vs. at-least-once).
Shorthand for get_checkpoint_config().get_checkpointing_mode().
:return: The :class:`~pyflink.datastream.CheckpointingMode`.
"""
j_checkpointing_mode = self._j_stream_execution_environment.getCheckpointingMode()
return CheckpointingMode._from_j_checkpointing_mode(j_checkpointing_mode)
def get_state_backend(self) -> StateBackend:
"""
Gets the state backend that defines how to store and checkpoint state.
.. seealso:: :func:`set_state_backend`
:return: The :class:`StateBackend`.
"""
j_state_backend = self._j_stream_execution_environment.getStateBackend()
return _from_j_state_backend(j_state_backend)
def set_state_backend(self, state_backend: StateBackend) -> 'StreamExecutionEnvironment':
"""
Sets the state backend that describes how to store and checkpoint operator state. It
defines both which data structures hold state during execution (for example hash tables,
RockDB, or other data stores) as well as where checkpointed data will be persisted.
The :class:`~pyflink.datastream.MemoryStateBackend` for example maintains the state in heap
memory, as objects. It is lightweight without extra dependencies, but can checkpoint only
small states(some counters).
In contrast, the :class:`~pyflink.datastream.FsStateBackend` stores checkpoints of the state
(also maintained as heap objects) in files. When using a replicated file system (like HDFS,
S3, Alluxio, etc) this will guarantee that state is not lost upon failures of
individual nodes and that streaming program can be executed highly available and strongly
consistent(assuming that Flink is run in high-availability mode).
The build-in state backend includes:
:class:`~pyflink.datastream.MemoryStateBackend`,
:class:`~pyflink.datastream.FsStateBackend`
and :class:`~pyflink.datastream.RocksDBStateBackend`.
.. seealso:: :func:`get_state_backend`
Example:
::
>>> env.set_state_backend(EmbeddedRocksDBStateBackend())
:param state_backend: The :class:`StateBackend`.
:return: This object.
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.setStateBackend(state_backend._j_state_backend)
return self
def enable_changelog_state_backend(self, enabled: bool) -> 'StreamExecutionEnvironment':
"""
Enable the change log for current state backend. This change log allows operators to persist
state changes in a very fine-grained manner. Currently, the change log only applies to keyed
state, so non-keyed operator state and channel state are persisted as usual. The 'state'
here refers to 'keyed state'. Details are as follows:
* Stateful operators write the state changes to that log (logging the state), in addition \
to applying them to the state tables in RocksDB or the in-mem Hashtable.
* An operator can acknowledge a checkpoint as soon as the changes in the log have reached \
the durable checkpoint storage.
* The state tables are persisted periodically, independent of the checkpoints. We call \
this the materialization of the state on the checkpoint storage.
* Once the state is materialized on checkpoint storage, the state changelog can be \
truncated to the corresponding point.
It establish a way to drastically reduce the checkpoint interval for streaming
applications across state backends. For more details please check the FLIP-158.
If this method is not called explicitly, it means no preference for enabling the change
log. Configs for change log enabling will override in different config levels
(job/local/cluster).
.. seealso:: :func:`is_changelog_state_backend_enabled`
:param enabled: True if enable the change log for state backend explicitly, otherwise
disable the change log.
:return: This object.
.. versionadded:: 1.14.0
"""
self._j_stream_execution_environment = \
self._j_stream_execution_environment.enableChangelogStateBackend(enabled)
return self
def is_changelog_state_backend_enabled(self) -> Optional[bool]:
"""
Gets the enable status of change log for state backend.
.. seealso:: :func:`enable_changelog_state_backend`
:return: An :class:`Optional[bool]` for the enable status of change log for state backend.
Could be None if user never specify this by calling
:func:`enable_changelog_state_backend`.
.. versionadded:: 1.14.0
"""
j_ternary_boolean = self._j_stream_execution_environment.isChangelogStateBackendEnabled()
return j_ternary_boolean.getAsBoolean()
def set_default_savepoint_directory(self, directory: str) -> 'StreamExecutionEnvironment':
"""
Sets the default savepoint directory, where savepoints will be written to if none
is explicitly provided when triggered.
Example:
::
>>> env.set_default_savepoint_directory("hdfs://savepoints")
:param directory The savepoint directory
:return: This object.
"""
self._j_stream_execution_environment.setDefaultSavepointDirectory(directory)
return self
def get_default_savepoint_directory(self) -> Optional[str]:
"""
Gets the default savepoint directory for this Job.
"""
j_path = self._j_stream_execution_environment.getDefaultSavepointDirectory()
if j_path is None:
return None
else:
return j_path.toString()
def set_restart_strategy(self, restart_strategy_configuration: RestartStrategyConfiguration):
"""
Sets the restart strategy configuration. The configuration specifies which restart strategy
will be used for the execution graph in case of a restart.
Example:
::
>>> env.set_restart_strategy(RestartStrategies.no_restart())
:param restart_strategy_configuration: Restart strategy configuration to be set.
:return:
"""
self._j_stream_execution_environment.setRestartStrategy(
restart_strategy_configuration._j_restart_strategy_configuration)
def get_restart_strategy(self) -> RestartStrategyConfiguration:
"""
Returns the specified restart strategy configuration.
:return: The restart strategy configuration to be used.
"""
return RestartStrategies._from_j_restart_strategy(
self._j_stream_execution_environment.getRestartStrategy())
def add_default_kryo_serializer(self, type_class_name: str, serializer_class_name: str):
"""
Adds a new Kryo default serializer to the Runtime.
Example:
::
>>> env.add_default_kryo_serializer("com.aaa.bbb.TypeClass", "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with the
given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.addDefaultKryoSerializer(type_clz, j_serializer_clz)
def register_type_with_kryo_serializer(self, type_class_name: str, serializer_class_name: str):
"""
Registers the given Serializer via its class as a serializer for the given type at the
KryoSerializer.
Example:
::
>>> env.register_type_with_kryo_serializer("com.aaa.bbb.TypeClass",
... "com.aaa.bbb.Serializer")
:param type_class_name: The full-qualified java class name of the types serialized with
the given serializer.
:param serializer_class_name: The full-qualified java class name of the serializer to use.
"""
type_clz = load_java_class(type_class_name)
j_serializer_clz = load_java_class(serializer_class_name)
self._j_stream_execution_environment.registerTypeWithKryoSerializer(
type_clz, j_serializer_clz)
def register_type(self, type_class_name: str):
"""
Registers the given type with the serialization stack. If the type is eventually
serialized as a POJO, then the type is registered with the POJO serializer. If the
type ends up being serialized with Kryo, then it will be registered at Kryo to make
sure that only tags are written.
Example:
::
>>> env.register_type("com.aaa.bbb.TypeClass")
:param type_class_name: The full-qualified java class name of the type to register.
"""
type_clz = load_java_class(type_class_name)
self._j_stream_execution_environment.registerType(type_clz)
def set_stream_time_characteristic(self, characteristic: TimeCharacteristic):
"""
Sets the time characteristic for all streams create from this environment, e.g., processing
time, event time, or ingestion time.
If you set the characteristic to IngestionTime of EventTime this will set a default
watermark update interval of 200 ms. If this is not applicable for your application
you should change it using
:func:`pyflink.common.ExecutionConfig.set_auto_watermark_interval`.
Example:
::
>>> env.set_stream_time_characteristic(TimeCharacteristic.EventTime)
:param characteristic: The time characteristic, which could be
:data:`TimeCharacteristic.ProcessingTime`,
:data:`TimeCharacteristic.IngestionTime`,
:data:`TimeCharacteristic.EventTime`.
"""
j_characteristic = TimeCharacteristic._to_j_time_characteristic(characteristic)
self._j_stream_execution_environment.setStreamTimeCharacteristic(j_characteristic)
def get_stream_time_characteristic(self) -> 'TimeCharacteristic':
"""
Gets the time characteristic.
.. seealso:: :func:`set_stream_time_characteristic`
:return: The :class:`TimeCharacteristic`.
"""
j_characteristic = self._j_stream_execution_environment.getStreamTimeCharacteristic()
return TimeCharacteristic._from_j_time_characteristic(j_characteristic)
def configure(self, configuration: Configuration):
"""
Sets all relevant options contained in the :class:`~pyflink.common.Configuration`. such as
e.g. `pipeline.time-characteristic`. It will reconfigure
:class:`~pyflink.datastream.StreamExecutionEnvironment`,
:class:`~pyflink.common.ExecutionConfig` and :class:`~pyflink.datastream.CheckpointConfig`.
It will change the value of a setting only if a corresponding option was set in the
`configuration`. If a key is not present, the current value of a field will remain
untouched.
:param configuration: a configuration to read the values from.
.. versionadded:: 1.15.0
"""
self._j_stream_execution_environment.configure(configuration._j_configuration,
get_gateway().jvm.Thread.currentThread()
.getContextClassLoader())
def add_python_file(self, file_path: str):
"""
Adds a python dependency which could be python files, python packages or
local directories. They will be added to the PYTHONPATH of the python UDF worker.
Please make sure that these dependencies can be imported.
:param file_path: The path of the python dependency.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil\
.getEnvironmentConfig(self._j_stream_execution_environment)
python_files = env_config.getString(jvm.PythonOptions.PYTHON_FILES.key(), None)
if python_files is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join([file_path, python_files])
else:
python_files = file_path
env_config.setString(jvm.PythonOptions.PYTHON_FILES.key(), python_files)
def set_python_requirements(self, requirements_file_path: str,
requirements_cache_dir: str = None):
"""
Specifies a requirements.txt file which defines the third-party dependencies.
These dependencies will be installed to a temporary directory and added to the
PYTHONPATH of the python UDF worker.
For the dependencies which could not be accessed in the cluster, a directory which contains
the installation packages of these dependencies could be specified using the parameter
"requirements_cached_dir". It will be uploaded to the cluster to support offline
installation.
Example:
::
# commands executed in shell
$ echo numpy==1.16.5 > requirements.txt
$ pip download -d cached_dir -r requirements.txt --no-binary :all:
# python code
>>> stream_env.set_python_requirements("requirements.txt", "cached_dir")
.. note::
Please make sure the installation packages matches the platform of the cluster
and the python version used. These packages will be installed using pip,
so also make sure the version of Pip (version >= 20.3) and the version of
SetupTools (version >= 37.0.0).
:param requirements_file_path: The path of "requirements.txt" file.
:param requirements_cache_dir: The path of the local directory which contains the
installation packages.
"""
jvm = get_gateway().jvm
python_requirements = requirements_file_path
if requirements_cache_dir is not None:
python_requirements = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[python_requirements, requirements_cache_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_REQUIREMENTS.key(), python_requirements)
def add_python_archive(self, archive_path: str, target_dir: str = None):
"""
Adds a python archive file. The file will be extracted to the working directory of
python UDF worker.
If the parameter "target_dir" is specified, the archive file will be extracted to a
directory named ${target_dir}. Otherwise, the archive file will be extracted to a
directory with the same name of the archive file.
If python UDF depends on a specific python version which does not exist in the cluster,
this method can be used to upload the virtual environment.
Note that the path of the python interpreter contained in the uploaded environment
should be specified via the method :func:`pyflink.table.TableConfig.set_python_executable`.
The files uploaded via this method are also accessible in UDFs via relative path.
Example:
::
# command executed in shell
# assert the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
# or
>>> stream_env.add_python_archive("py_env.zip", "myenv")
>>> stream_env.set_python_executable("myenv/py_env/bin/python")
# the files contained in the archive file can be accessed in UDF
>>> def my_udf():
... with open("myenv/py_env/data/data.txt") as f:
... ...
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.6 or higher.
.. note::
Currently only zip-format is supported. i.e. zip, jar, whl, egg, etc.
The other archive formats such as tar, tar.gz, 7z, rar, etc are not supported.
:param archive_path: The archive file path.
:param target_dir: Optional, the target dir name that the archive file extracted to.
"""
jvm = get_gateway().jvm
if target_dir is not None:
archive_path = jvm.PythonDependencyUtils.PARAM_DELIMITER.join(
[archive_path, target_dir])
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
python_archives = env_config.getString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), None)
if python_archives is not None:
python_files = jvm.PythonDependencyUtils.FILE_DELIMITER.join(
[python_archives, archive_path])
else:
python_files = archive_path
env_config.setString(jvm.PythonOptions.PYTHON_ARCHIVES.key(), python_files)
def set_python_executable(self, python_exec: str):
"""
Sets the path of the python interpreter which is used to execute the python udf workers.
e.g. "/usr/local/bin/python3".
If python UDF depends on a specific python version which does not exist in the cluster,
the method :func:`pyflink.datastream.StreamExecutionEnvironment.add_python_archive` can be
used to upload a virtual environment. The path of the python interpreter contained in the
uploaded environment can be specified via this method.
Example:
::
# command executed in shell
# assume that the relative path of python interpreter is py_env/bin/python
$ zip -r py_env.zip py_env
# python code
>>> stream_env.add_python_archive("py_env.zip")
>>> stream_env.set_python_executable("py_env.zip/py_env/bin/python")
.. note::
Please make sure the uploaded python environment matches the platform that the cluster
is running on and that the python version must be 3.7 or higher.
.. note::
The python udf worker depends on Apache Beam (version == 2.43.0).
Please ensure that the specified environment meets the above requirements.
:param python_exec: The path of python interpreter.
"""
jvm = get_gateway().jvm
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
env_config.setString(jvm.PythonOptions.PYTHON_EXECUTABLE.key(), python_exec)
def add_jars(self, *jars_path: str):
"""
Adds a list of jar files that will be uploaded to the cluster and referenced by the job.
:param jars_path: Path of jars.
"""
add_jars_to_context_class_loader(jars_path)
jvm = get_gateway().jvm
jars_key = jvm.org.apache.flink.configuration.PipelineOptions.JARS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_jar_paths = env_config.getString(jars_key, None)
joined_jars_path = ';'.join(jars_path)
if old_jar_paths and old_jar_paths.strip():
joined_jars_path = ';'.join([old_jar_paths, joined_jars_path])
env_config.setString(jars_key, joined_jars_path)
def add_classpaths(self, *classpaths: str):
"""
Adds a list of URLs that are added to the classpath of each user code classloader of the
program. Paths must specify a protocol (e.g. file://) and be accessible on all nodes
:param classpaths: Classpaths that will be added.
"""
add_jars_to_context_class_loader(classpaths)
jvm = get_gateway().jvm
classpaths_key = jvm.org.apache.flink.configuration.PipelineOptions.CLASSPATHS.key()
env_config = jvm.org.apache.flink.python.util.PythonConfigUtil \
.getEnvironmentConfig(self._j_stream_execution_environment)
old_classpaths = env_config.getString(classpaths_key, None)
joined_classpaths = ';'.join(list(classpaths))
if old_classpaths and old_classpaths.strip():
joined_classpaths = ';'.join([old_classpaths, joined_classpaths])
env_config.setString(classpaths_key, joined_classpaths)
def get_default_local_parallelism(self) -> int:
"""
Gets the default parallelism that will be used for the local execution environment.
:return: The default local parallelism.
"""
return self._j_stream_execution_environment.getDefaultLocalParallelism()
def set_default_local_parallelism(self, parallelism: int):
"""
Sets the default parallelism that will be used for the local execution environment.
:param parallelism: The parallelism to use as the default local parallelism.
"""
self._j_stream_execution_environment.setDefaultLocalParallelism(parallelism)
def execute(self, job_name: str = None) -> JobExecutionResult:
"""
Triggers the program execution. The environment will execute all parts of
the program that have resulted in a "sink" operation. Sink operations are
for example printing results or forwarding them to a message queue.
The program execution will be logged and displayed with the provided name
:param job_name: Desired name of the job, optional.
:return: The result of the job execution, containing elapsed time and accumulators.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
return JobExecutionResult(self._j_stream_execution_environment.execute(j_stream_graph))
def execute_async(self, job_name: str = 'Flink Streaming Job') -> JobClient:
"""
Triggers the program asynchronously. The environment will execute all parts of the program
that have resulted in a "sink" operation. Sink operations are for example printing results
or forwarding them to a message queue.
The program execution will be logged and displayed with a generated default name.
:param job_name: Desired name of the job.
:return: A JobClient that can be used to communicate with the submitted job, completed on
submission succeeded.
"""
j_stream_graph = self._generate_stream_graph(clear_transformations=True, job_name=job_name)
j_job_client = self._j_stream_execution_environment.executeAsync(j_stream_graph)
return JobClient(j_job_client=j_job_client)
def get_execution_plan(self) -> str:
"""
Creates the plan with which the system will execute the program, and returns it as
a String using a JSON representation of the execution data flow graph.
Note that this needs to be called, before the plan is executed.
If the compiler could not be instantiated, or the master could not
be contacted to retrieve information relevant to the execution planning,
an exception will be thrown.
:return: The execution plan of the program, as a JSON String.
"""
j_stream_graph = self._generate_stream_graph(False)
return j_stream_graph.getStreamingPlanAsJSON()
def register_cached_file(self, file_path: str, name: str, executable: bool = False):
"""
Registers a file at the distributed cache under the given name. The file will be accessible
from any user-defined function in the (distributed) runtime under a local path. Files may be
local files (which will be distributed via BlobServer), or files in a distributed file
system. The runtime will copy the files temporarily to a local cache, if needed.
:param file_path: The path of the file, as a URI (e.g. "file:///some/path" or
hdfs://host:port/and/path").
:param name: The name under which the file is registered.
:param executable: Flag indicating whether the file should be executable.
.. versionadded:: 1.16.0
"""
self._j_stream_execution_environment.registerCachedFile(file_path, name, executable)
@staticmethod
def get_execution_environment(configuration: Configuration = None) \
-> 'StreamExecutionEnvironment':
"""
Creates an execution environment that represents the context in which the
program is currently executed. If the program is invoked standalone, this
method returns a local execution environment.
When executed from the command line the given configuration is stacked on top of the
global configuration which comes from the flink-conf.yaml, potentially overriding
duplicated options.
:param configuration: The configuration to instantiate the environment with.
:return: The execution environment of the context in which the program is executed.
"""
gateway = get_gateway()
JStreamExecutionEnvironment = gateway.jvm.org.apache.flink.streaming.api.environment \
.StreamExecutionEnvironment
if configuration:
j_stream_exection_environment = JStreamExecutionEnvironment.getExecutionEnvironment(
configuration._j_configuration)
else:
j_stream_exection_environment = JStreamExecutionEnvironment.getExecutionEnvironment()
return StreamExecutionEnvironment(j_stream_exection_environment)
def create_input(self, input_format: InputFormat,
type_info: Optional[TypeInformation] = None):
"""
Create an input data stream with InputFormat.
If the input_format needs a well-defined type information (e.g. Avro's generic record), you
can either explicitly use type_info argument or use InputFormats implementing
ResultTypeQueryable.
:param input_format: The input format to read from.
:param type_info: Optional type information to explicitly declare output type.
.. versionadded:: 1.16.0
"""
input_type_info = type_info
if input_type_info is None and isinstance(input_format, ResultTypeQueryable):
input_type_info = cast(ResultTypeQueryable, input_format).get_produced_type()
if input_type_info is None:
j_data_stream = self._j_stream_execution_environment.createInput(
input_format.get_java_object()
)
else:
j_data_stream = self._j_stream_execution_environment.createInput(
input_format.get_java_object(), input_type_info.get_java_type_info()
)
return DataStream(j_data_stream=j_data_stream)
def add_source(self, source_func: SourceFunction, source_name: str = 'Custom Source',
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data source to the streaming topology.
:param source_func: the user defined function.
:param source_name: name of the data source. Optional.
:param type_info: type of the returned stream. Optional.
:return: the data stream constructed.
"""
if type_info:
j_type_info = type_info.get_java_type_info()
else:
j_type_info = None
j_data_stream = self._j_stream_execution_environment.addSource(source_func
.get_java_function(),
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def from_source(self,
source: Source,
watermark_strategy: WatermarkStrategy,
source_name: str,
type_info: TypeInformation = None) -> 'DataStream':
"""
Adds a data :class:`~pyflink.datastream.connectors.Source` to the environment to get a
:class:`~pyflink.datastream.DataStream`.
The result will be either a bounded data stream (that can be processed in a batch way) or
an unbounded data stream (that must be processed in a streaming way), based on the
boundedness property of the source.
This method takes an explicit type information for the produced data stream, so that
callers can define directly what type/serializer will be used for the produced stream. For
sources that describe their produced type, the parameter type_info should not be specified
to avoid specifying the produced type redundantly.
.. versionadded:: 1.13.0
"""
if type_info:
j_type_info = type_info.get_java_type_info()
else:
j_type_info = None
j_data_stream = self._j_stream_execution_environment.fromSource(
source.get_java_function(),
watermark_strategy._j_watermark_strategy,
source_name,
j_type_info)
return DataStream(j_data_stream=j_data_stream)
def read_text_file(self, file_path: str, charset_name: str = "UTF-8") -> DataStream:
"""
Reads the given file line-by-line and creates a DataStream that contains a string with the
contents of each such line. The charset with the given name will be used to read the files.
Note that this interface is not fault tolerant that is supposed to be used for test purpose.
:param file_path: The path of the file, as a URI (e.g., "file:///some/local/file" or
"hdfs://host:port/file/path")
:param charset_name: The name of the character set used to read the file.
:return: The DataStream that represents the data read from the given file as text lines.
"""
return DataStream(self._j_stream_execution_environment
.readTextFile(file_path, charset_name))
def from_collection(self, collection: List[Any],
type_info: TypeInformation = None) -> DataStream:
"""
Creates a data stream from the given non-empty collection. The type of the data stream is
that of the elements in the collection.
Note that this operation will result in a non-parallel data stream source, i.e. a data
stream source with parallelism one.
:param collection: The collection of elements to create the data stream from.
:param type_info: The TypeInformation for the produced data stream
:return: the data stream representing the given collection.
"""
if type_info is not None:
collection = [type_info.to_internal_type(element) for element in collection]
return self._from_collection(collection, type_info)
def _from_collection(self, elements: List[Any],
type_info: TypeInformation = None) -> DataStream:
temp_file = tempfile.NamedTemporaryFile(delete=False, dir=tempfile.mkdtemp())
serializer = self.serializer
try:
with temp_file:
# dumps elements to a temporary file by pickle serializer.
serializer.serialize(elements, temp_file)
gateway = get_gateway()
# if user does not defined the element data types, read the pickled data as a byte array
# list.
if type_info is None:
j_objs = gateway.jvm.PythonBridgeUtils.readPickledBytes(temp_file.name)
out_put_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
else:
j_objs = gateway.jvm.PythonBridgeUtils.readPythonObjects(temp_file.name)
out_put_type_info = type_info
PythonTypeUtils = gateway.jvm\
.org.apache.flink.streaming.api.utils.PythonTypeUtils
execution_config = self._j_stream_execution_environment.getConfig()
j_input_format = PythonTypeUtils.getCollectionInputFormat(
j_objs,
out_put_type_info.get_java_type_info(),
execution_config
)
JInputFormatSourceFunction = gateway.jvm.org.apache.flink.streaming.api.functions.\
source.InputFormatSourceFunction
JBoundedness = gateway.jvm.org.apache.flink.api.connector.source.Boundedness
j_data_stream_source = invoke_method(
self._j_stream_execution_environment,
"org.apache.flink.streaming.api.environment.StreamExecutionEnvironment",
"addSource",
[JInputFormatSourceFunction(j_input_format, out_put_type_info.get_java_type_info()),
"Collection Source",
out_put_type_info.get_java_type_info(),
JBoundedness.BOUNDED],
["org.apache.flink.streaming.api.functions.source.SourceFunction",
"java.lang.String",
"org.apache.flink.api.common.typeinfo.TypeInformation",
"org.apache.flink.api.connector.source.Boundedness"])
j_data_stream_source.forceNonParallel()
return DataStream(j_data_stream=j_data_stream_source)
finally:
os.unlink(temp_file.name)
def _generate_stream_graph(self, clear_transformations: bool = False, job_name: str = None) \
-> JavaObject:
gateway = get_gateway()
JPythonConfigUtil = gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(self._j_stream_execution_environment)
gateway.jvm.org.apache.flink.python.chain.PythonOperatorChainingOptimizer.apply(
self._j_stream_execution_environment)
JPythonConfigUtil.setPartitionCustomOperatorNumPartitions(
get_field_value(self._j_stream_execution_environment, "transformations"))
j_stream_graph = self._j_stream_execution_environment.getStreamGraph(clear_transformations)
if job_name is not None:
j_stream_graph.setJobName(job_name)
return j_stream_graph
def _open(self):
# start BeamFnLoopbackWorkerPoolServicer when executed in MiniCluster
j_configuration = get_j_env_configuration(self._j_stream_execution_environment)
def startup_loopback_server():
from pyflink.common import Configuration
from pyflink.fn_execution.beam.beam_worker_pool_service import \
BeamFnLoopbackWorkerPoolServicer
config = Configuration(j_configuration=j_configuration)
config.set_string(
"python.loopback-server.address", BeamFnLoopbackWorkerPoolServicer().start())
python_worker_execution_mode = os.environ.get('_python_worker_execution_mode')
if python_worker_execution_mode is None:
if is_local_deployment(j_configuration):
startup_loopback_server()
elif python_worker_execution_mode == 'loopback':
if is_local_deployment(j_configuration):
startup_loopback_server()
else:
raise ValueError("Loopback mode is enabled, however the job wasn't configured to "
"run in local deployment mode")
elif python_worker_execution_mode != 'process':
raise ValueError(
"It only supports to execute the Python worker in 'loopback' mode and 'process' "
"mode, unknown mode '%s' is configured" % python_worker_execution_mode)
def is_unaligned_checkpoints_enabled(self):
"""
Returns whether Unaligned Checkpoints are enabled.
"""
return self._j_stream_execution_environment.isUnalignedCheckpointsEnabled()
def is_force_unaligned_checkpoints(self):
"""
Returns whether Unaligned Checkpoints are force-enabled.
"""
return self._j_stream_execution_environment.isForceUnalignedCheckpoints()
def close(self):
"""
Close and clean up the execution environment. All the cached intermediate results will be
released physically.
.. versionadded:: 1.16.0
"""
self._j_stream_execution_environment.close()
| 49,739 | 45.312849 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/state.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from enum import Enum
from typing import TypeVar, Generic, Iterable, List, Iterator, Dict, Tuple, Optional
from pyflink.common.time import Time
from pyflink.common.typeinfo import TypeInformation, Types
__all__ = [
'ValueStateDescriptor',
'ValueState',
'ListStateDescriptor',
'ListState',
'MapStateDescriptor',
'MapState',
'ReducingStateDescriptor',
'ReducingState',
'AggregatingStateDescriptor',
'AggregatingState',
'ReadOnlyBroadcastState',
'BroadcastState',
'StateTtlConfig',
'OperatorStateStore',
]
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
IN = TypeVar('IN')
OUT = TypeVar('OUT')
class OperatorStateStore(ABC):
"""
Interface for getting operator states. Currently, only :class:`~state.BroadcastState` is
supported.
.. versionadded:: 1.16.0
"""
@abstractmethod
def get_broadcast_state(self, state_descriptor: 'MapStateDescriptor') -> 'BroadcastState':
"""
Fetches the :class:`~state.BroadcastState` described by :class:`~state.MapStateDescriptor`,
which has read/write access to the broadcast operator state.
"""
pass
class State(ABC):
"""
Interface that different types of partitioned state must implement.
"""
@abstractmethod
def clear(self) -> None:
"""
Removes the value mapped under the current key.
"""
pass
class ValueState(State, Generic[T]):
"""
:class:`State` interface for partitioned single-value state. The value can be retrieved or
updated.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
"""
@abstractmethod
def value(self) -> T:
"""
Returns the current value for the state. When the state is not partitioned the returned
value is the same for all inputs in a given operator instance. If state partitioning is
applied, the value returned depends on the current operator input, as the operator
maintains an independent state for each partition.
"""
pass
@abstractmethod
def update(self, value: T) -> None:
"""
Updates the operator state accessible by :func:`value` to the given value. The next time
:func:`value` is called (for the same state partition) the returned state will represent
the updated value. When a partitioned state is updated with null, the state for the current
key will be removed and the default value is returned on the next access.
"""
pass
class AppendingState(State, Generic[IN, OUT]):
"""
Base interface for partitioned state that supports adding elements and inspecting the current
state. Elements can either be kept in a buffer (list-like) or aggregated into one value.
This state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state is only accessible by functions applied on a KeyedStream. The key is automatically
supplied by the system, so the function always sees the value mapped to the key of the current
element. That way, the system can handle stream and state partitioning consistently together.
"""
@abstractmethod
def get(self) -> OUT:
"""
Returns the elements under the current key.
"""
pass
@abstractmethod
def add(self, value: IN) -> None:
"""
Adding the given value to the tail of this list state.
"""
pass
class MergingState(AppendingState[IN, OUT]):
"""
Extension of AppendingState that allows merging of state. That is, two instance of MergingState
can be combined into a single instance that contains all the information of the two merged
states.
"""
pass
class ReducingState(MergingState[T, T]):
"""
:class:`State` interface for reducing state. Elements can be added to the state, they will be
combined using a reduce function. The current state can be inspected.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state is only accessible by functions applied on a KeyedStream. The key is automatically
supplied by the system, so the function always sees the value mapped to the key of the current
element. That way, the system can handle stream and state partitioning consistently together.
"""
pass
class AggregatingState(MergingState[IN, OUT]):
"""
:class:`State` interface for aggregating state, based on an
:class:`~pyflink.datastream.functions.AggregateFunction`. Elements that are added to this type
of state will be eagerly pre-aggregated using a given AggregateFunction.
The state holds internally always the accumulator type of the AggregateFunction. When
accessing the result of the state, the function's
:func:`~pyflink.datastream.functions.AggregateFunction.get_result` method.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state is only accessible by functions applied on a KeyedStream. The key is automatically
supplied by the system, so the function always sees the value mapped to the key of the current
element. That way, the system can handle stream and state partitioning consistently together.
"""
pass
class ListState(MergingState[T, Iterable[T]]):
"""
:class:`State` interface for partitioned list state in Operations.
The state is accessed and modified by user functions, and checkpointed consistently
by the system as part of the distributed snapshots.
Currently only keyed list state is supported.
When it is a keyed list state, the state key is automatically supplied by the system, so the
user function always sees the value mapped to the key of the current element. That way, the
system can handle stream and state partitioning consistently together.
"""
@abstractmethod
def update(self, values: List[T]) -> None:
"""
Updating existing values to to the given list of values.
"""
pass
@abstractmethod
def add_all(self, values: List[T]) -> None:
"""
Adding the given values to the tail of this list state.
"""
pass
def __iter__(self) -> Iterator[T]:
return iter(self.get())
class MapState(State, Generic[K, V]):
"""
:class:`State` interface for partitioned key-value state. The key-value pair can be added,
updated and retrieved.
The state is accessed and modified by user functions, and checkpointed consistently by the
system as part of the distributed snapshots.
The state key is automatically supplied by the system, so the function always sees the value
mapped to the key of the current element. That way, the system can handle stream and state
partitioning consistently together.
"""
@abstractmethod
def get(self, key: K) -> V:
"""
Returns the current value associated with the given key.
"""
pass
@abstractmethod
def put(self, key: K, value: V) -> None:
"""
Associates a new value with the given key.
"""
pass
@abstractmethod
def put_all(self, dict_value: Dict[K, V]) -> None:
"""
Copies all of the mappings from the given map into the state.
"""
pass
@abstractmethod
def remove(self, key: K) -> None:
"""
Deletes the mapping of the given key.
"""
pass
@abstractmethod
def contains(self, key: K) -> bool:
"""
Returns whether there exists the given mapping.
"""
pass
@abstractmethod
def items(self) -> Iterable[Tuple[K, V]]:
"""
Returns all the mappings in the state.
"""
pass
@abstractmethod
def keys(self) -> Iterable[K]:
"""
Returns all the keys in the state.
"""
pass
@abstractmethod
def values(self) -> Iterable[V]:
"""
Returns all the values in the state.
"""
pass
@abstractmethod
def is_empty(self) -> bool:
"""
Returns true if this state contains no key-value mappings, otherwise false.
"""
pass
def __getitem__(self, key: K) -> V:
return self.get(key)
def __setitem__(self, key: K, value: V) -> None:
self.put(key, value)
def __delitem__(self, key: K) -> None:
self.remove(key)
def __contains__(self, key: K) -> bool:
return self.contains(key)
def __iter__(self) -> Iterator[K]:
return iter(self.keys())
class ReadOnlyBroadcastState(State, Generic[K, V]):
"""
A read-only view of the :class:`BroadcastState`.
Although read-only, the user code should not modify the value returned by the :meth:`get` or the
items returned by :meth:`items`, as this can lead to inconsistent states. The reason for this is
that we do not create extra copies of the elements for performance reasons.
"""
@abstractmethod
def get(self, key: K) -> V:
"""
Returns the current value associated with the given key.
"""
pass
@abstractmethod
def contains(self, key: K) -> bool:
"""
Returns whether there exists the given mapping.
"""
pass
@abstractmethod
def items(self) -> Iterable[Tuple[K, V]]:
"""
Returns all the mappings in the state.
"""
pass
@abstractmethod
def keys(self) -> Iterable[K]:
"""
Returns all the keys in the state.
"""
pass
@abstractmethod
def values(self) -> Iterable[V]:
"""
Returns all the values in the state.
"""
pass
@abstractmethod
def is_empty(self) -> bool:
"""
Returns true if this state contains no key-value mappings, otherwise false.
"""
pass
def __getitem__(self, key: K) -> V:
return self.get(key)
def __contains__(self, key: K) -> bool:
return self.contains(key)
def __iter__(self) -> Iterator[K]:
return iter(self.keys())
class BroadcastState(ReadOnlyBroadcastState[K, V]):
"""
A type of state that can be created to store the state of a :class:`BroadcastStream`. This state
assumes that the same elements are sent to all instances of an operator.
CAUTION: the user has to guarantee that all task instances store the same elements in
this type of state.
Each operator instance individually maintains and stores elements in the broadcast state. The
fact that the incoming stream is a broadcast one guarantees that all instances see all the
elements. Upon recovery or re-scaling, the same state is given to each of the instances.
To avoid hotspots, each task reads its previous partition, and if there are more tasks (scale up
), then the new instances read from the old instances in a round-robin fashion. This is why each
instance has to guarantee that it stores the same elements as the rest. If not, upon recovery or
rescaling you may have unpredictable redistribution of the partitions, thus unpredictable
results.
"""
@abstractmethod
def put(self, key: K, value: V) -> None:
"""
Associates a new value with the given key.
"""
pass
@abstractmethod
def put_all(self, dict_value: Dict[K, V]) -> None:
"""
Copies all of the mappings from the given map into the state.
"""
pass
@abstractmethod
def remove(self, key: K) -> None:
"""
Deletes the mapping of the given key.
"""
pass
def __setitem__(self, key: K, value: V) -> None:
self.put(key, value)
def __delitem__(self, key: K) -> None:
self.remove(key)
class StateDescriptor(ABC):
"""
Base class for state descriptors. A StateDescriptor is used for creating partitioned State in
stateful operations.
"""
def __init__(self, name: str, type_info: TypeInformation):
"""
Constructor for StateDescriptor.
:param name: The name of the state
:param type_info: The type information of the value.
"""
self.name = name
self.type_info = type_info
self._ttl_config = None # type: Optional[StateTtlConfig]
def get_name(self) -> str:
"""
Get the name of the state.
:return: The name of the state.
"""
return self.name
def enable_time_to_live(self, ttl_config: 'StateTtlConfig'):
"""
Configures optional activation of state time-to-live (TTL).
State user value will expire, become unavailable and be cleaned up in storage depending on
configured StateTtlConfig.
:param ttl_config: Configuration of state TTL
"""
self._ttl_config = ttl_config
class ValueStateDescriptor(StateDescriptor):
"""
StateDescriptor for ValueState. This can be used to create partitioned value state using
RuntimeContext.get_state(ValueStateDescriptor).
"""
def __init__(self, name: str, value_type_info: TypeInformation):
"""
Constructor of the ValueStateDescriptor.
:param name: The name of the state.
:param value_type_info: the type information of the state.
"""
super(ValueStateDescriptor, self).__init__(name, value_type_info)
class ListStateDescriptor(StateDescriptor):
"""
StateDescriptor for ListState. This can be used to create state where the type is a list that
can be appended and iterated over.
"""
def __init__(self, name: str, elem_type_info: TypeInformation):
"""
Constructor of the ListStateDescriptor.
:param name: The name of the state.
:param elem_type_info: the type information of the state element.
"""
super(ListStateDescriptor, self).__init__(name, Types.LIST(elem_type_info))
class MapStateDescriptor(StateDescriptor):
"""
StateDescriptor for MapState. This can be used to create state where the type is a map that can
be updated and iterated over.
"""
def __init__(self, name: str, key_type_info: TypeInformation, value_type_info: TypeInformation):
"""
Constructor of the MapStateDescriptor.
:param name: The name of the state.
:param key_type_info: The type information of the key.
:param value_type_info: the type information of the value.
"""
super(MapStateDescriptor, self).__init__(name, Types.MAP(key_type_info, value_type_info))
class ReducingStateDescriptor(StateDescriptor):
"""
StateDescriptor for ReducingState. This can be used to create partitioned reducing state using
RuntimeContext.get_reducing_state(ReducingStateDescriptor).
"""
def __init__(self,
name: str,
reduce_function,
type_info: TypeInformation):
"""
Constructor of the ReducingStateDescriptor.
:param name: The name of the state.
:param reduce_function: The ReduceFunction used to aggregate the state.
:param type_info: The type of the values in the state.
"""
super(ReducingStateDescriptor, self).__init__(name, type_info)
from pyflink.datastream.functions import ReduceFunction, ReduceFunctionWrapper
if not isinstance(reduce_function, ReduceFunction):
if callable(reduce_function):
reduce_function = ReduceFunctionWrapper(reduce_function) # type: ignore
else:
raise TypeError("The input must be a ReduceFunction or a callable function!")
self._reduce_function = reduce_function
def get_reduce_function(self):
return self._reduce_function
class AggregatingStateDescriptor(StateDescriptor):
"""
A StateDescriptor for AggregatingState.
The type internally stored in the state is the type of the Accumulator of the
:func:`~pyflink.datastream.functions.AggregateFunction`.
"""
def __init__(self,
name: str,
agg_function,
state_type_info):
super(AggregatingStateDescriptor, self).__init__(name, state_type_info)
from pyflink.datastream.functions import AggregateFunction
if not isinstance(agg_function, AggregateFunction):
raise TypeError("The input must be a pyflink.datastream.functions.AggregateFunction!")
self._agg_function = agg_function
def get_agg_function(self):
return self._agg_function
class StateTtlConfig(object):
class UpdateType(Enum):
"""
This option value configures when to update last access timestamp which prolongs state TTL.
"""
Disabled = 0
"""
TTL is disabled. State does not expire.
"""
OnCreateAndWrite = 1
"""
Last access timestamp is initialised when state is created and updated on every write
operation.
"""
OnReadAndWrite = 2
"""
The same as OnCreateAndWrite but also updated on read.
"""
def _to_proto(self):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
return getattr(StateDescriptor.StateTTLConfig.UpdateType, self.name)
@staticmethod
def _from_proto(proto):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
update_type_name = StateDescriptor.StateTTLConfig.UpdateType.Name(proto)
return StateTtlConfig.UpdateType[update_type_name]
class StateVisibility(Enum):
"""
This option configures whether expired user value can be returned or not.
"""
ReturnExpiredIfNotCleanedUp = 0
"""
Return expired user value if it is not cleaned up yet.
"""
NeverReturnExpired = 1
"""
Never return expired user value.
"""
def _to_proto(self):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
return getattr(StateDescriptor.StateTTLConfig.StateVisibility, self.name)
@staticmethod
def _from_proto(proto):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
state_visibility_name = StateDescriptor.StateTTLConfig.StateVisibility.Name(proto)
return StateTtlConfig.StateVisibility[state_visibility_name]
class TtlTimeCharacteristic(Enum):
"""
This option configures time scale to use for ttl.
"""
ProcessingTime = 0
"""
Processing time
"""
def _to_proto(self):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
return getattr(StateDescriptor.StateTTLConfig.TtlTimeCharacteristic, self.name)
@staticmethod
def _from_proto(proto):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
ttl_time_characteristic_name = \
StateDescriptor.StateTTLConfig.TtlTimeCharacteristic.Name(proto)
return StateTtlConfig.TtlTimeCharacteristic[ttl_time_characteristic_name]
def __init__(self,
update_type: UpdateType,
state_visibility: StateVisibility,
ttl_time_characteristic: TtlTimeCharacteristic,
ttl: Time,
cleanup_strategies: 'StateTtlConfig.CleanupStrategies'):
self._update_type = update_type
self._state_visibility = state_visibility
self._ttl_time_characteristic = ttl_time_characteristic
self._ttl = ttl
self._cleanup_strategies = cleanup_strategies
@staticmethod
def new_builder(ttl: Time):
return StateTtlConfig.Builder(ttl)
def get_update_type(self) -> 'StateTtlConfig.UpdateType':
return self._update_type
def get_state_visibility(self) -> 'StateTtlConfig.StateVisibility':
return self._state_visibility
def get_ttl(self) -> Time:
return self._ttl
def get_ttl_time_characteristic(self) -> 'StateTtlConfig.TtlTimeCharacteristic':
return self._ttl_time_characteristic
def is_enabled(self) -> bool:
return self._update_type.value != StateTtlConfig.UpdateType.Disabled.value
def get_cleanup_strategies(self) -> 'StateTtlConfig.CleanupStrategies':
return self._cleanup_strategies
def _to_proto(self):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
state_ttl_config = StateDescriptor.StateTTLConfig()
state_ttl_config.update_type = self._update_type._to_proto()
state_ttl_config.state_visibility = self._state_visibility._to_proto()
state_ttl_config.ttl_time_characteristic = self._ttl_time_characteristic._to_proto()
state_ttl_config.ttl = self._ttl.to_milliseconds()
state_ttl_config.cleanup_strategies.CopyFrom(self._cleanup_strategies._to_proto())
return state_ttl_config
@staticmethod
def _from_proto(proto):
update_type = StateTtlConfig.UpdateType._from_proto(proto.update_type)
state_visibility = StateTtlConfig.StateVisibility._from_proto(proto.state_visibility)
ttl_time_characteristic = \
StateTtlConfig.TtlTimeCharacteristic._from_proto(proto.ttl_time_characteristic)
ttl = Time.milliseconds(proto.ttl)
cleanup_strategies = StateTtlConfig.CleanupStrategies._from_proto(proto.cleanup_strategies)
builder = StateTtlConfig.new_builder(ttl) \
.set_update_type(update_type) \
.set_state_visibility(state_visibility) \
.set_ttl_time_characteristic(ttl_time_characteristic)
builder._strategies = cleanup_strategies._strategies
builder._is_cleanup_in_background = cleanup_strategies._is_cleanup_in_background
return builder.build()
def __repr__(self):
return "StateTtlConfig<" \
"update_type={}," \
" state_visibility={}," \
"ttl_time_characteristic ={}," \
"ttl={}>".format(self._update_type,
self._state_visibility,
self._ttl_time_characteristic,
self._ttl)
class Builder(object):
"""
Builder for the StateTtlConfig.
"""
def __init__(self, ttl: Time):
self._ttl = ttl
self._update_type = StateTtlConfig.UpdateType.OnCreateAndWrite
self._state_visibility = StateTtlConfig.StateVisibility.NeverReturnExpired
self._ttl_time_characteristic = StateTtlConfig.TtlTimeCharacteristic.ProcessingTime
self._is_cleanup_in_background = True
self._strategies = {} # type: Dict
def set_update_type(self,
update_type: 'StateTtlConfig.UpdateType') -> 'StateTtlConfig.Builder':
"""
Sets the ttl update type.
:param update_type: The ttl update type configures when to update last access timestamp
which prolongs state TTL.
"""
self._update_type = update_type
return self
def update_ttl_on_create_and_write(self) -> 'StateTtlConfig.Builder':
return self.set_update_type(StateTtlConfig.UpdateType.OnCreateAndWrite)
def update_ttl_on_read_and_write(self) -> 'StateTtlConfig.Builder':
return self.set_update_type(StateTtlConfig.UpdateType.OnReadAndWrite)
def set_state_visibility(
self,
state_visibility: 'StateTtlConfig.StateVisibility') -> 'StateTtlConfig.Builder':
"""
Sets the state visibility.
:param state_visibility: The state visibility configures whether expired user value can
be returned or not.
"""
self._state_visibility = state_visibility
return self
def return_expired_if_not_cleaned_up(self) -> 'StateTtlConfig.Builder':
return self.set_state_visibility(
StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp)
def never_return_expired(self) -> 'StateTtlConfig.Builder':
return self.set_state_visibility(StateTtlConfig.StateVisibility.NeverReturnExpired)
def set_ttl_time_characteristic(
self,
ttl_time_characteristic: 'StateTtlConfig.TtlTimeCharacteristic') \
-> 'StateTtlConfig.Builder':
"""
Sets the time characteristic.
:param ttl_time_characteristic: The time characteristic configures time scale to use for
ttl.
"""
self._ttl_time_characteristic = ttl_time_characteristic
return self
def use_processing_time(self) -> 'StateTtlConfig.Builder':
return self.set_ttl_time_characteristic(
StateTtlConfig.TtlTimeCharacteristic.ProcessingTime)
def cleanup_full_snapshot(self) -> 'StateTtlConfig.Builder':
"""
Cleanup expired state in full snapshot on checkpoint.
"""
self._strategies[
StateTtlConfig.CleanupStrategies.Strategies.FULL_STATE_SCAN_SNAPSHOT] = \
StateTtlConfig.CleanupStrategies.EMPTY_STRATEGY
return self
def cleanup_incrementally(self,
cleanup_size: int,
run_cleanup_for_every_record) -> 'StateTtlConfig.Builder':
"""
Cleanup expired state incrementally cleanup local state.
Upon every state access this cleanup strategy checks a bunch of state keys for
expiration and cleans up expired ones. It keeps a lazy iterator through all keys with
relaxed consistency if backend supports it. This way all keys should be regularly
checked and cleaned eventually over time if any state is constantly being accessed.
Additionally to the incremental cleanup upon state access, it can also run per every
record. Caution: if there are a lot of registered states using this option, they all
will be iterated for every record to check if there is something to cleanup.
if no access happens to this state or no records are processed in case of
run_cleanup_for_every_record, expired state will persist.
Time spent for the incremental cleanup increases record processing latency.
Note:
At the moment incremental cleanup is implemented only for Heap state backend.
Setting it for RocksDB will have no effect.
Note:
If heap state backend is used with synchronous snapshotting, the global iterator keeps a
copy of all keys while iterating because of its specific implementation which does not
support concurrent modifications. Enabling of this feature will increase memory
consumption then. Asynchronous snapshotting does not have this problem.
:param cleanup_size: max number of keys pulled from queue for clean up upon state touch
for any key
:param run_cleanup_for_every_record: run incremental cleanup per each processed record
"""
self._strategies[StateTtlConfig.CleanupStrategies.Strategies.INCREMENTAL_CLEANUP] = \
StateTtlConfig.CleanupStrategies.IncrementalCleanupStrategy(
cleanup_size, run_cleanup_for_every_record)
return self
def cleanup_in_rocksdb_compact_filter(
self,
query_time_after_num_entries) -> 'StateTtlConfig.Builder':
"""
Cleanup expired state while Rocksdb compaction is running.
RocksDB compaction filter will query current timestamp, used to check expiration, from
Flink every time after processing {@code queryTimeAfterNumEntries} number of state
entries. Updating the timestamp more often can improve cleanup speed but it decreases
compaction performance because it uses JNI call from native code.
:param query_time_after_num_entries: number of state entries to process by compaction
filter before updating current timestamp
:return:
"""
self._strategies[
StateTtlConfig.CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER] = \
StateTtlConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy(
query_time_after_num_entries)
return self
def disable_cleanup_in_background(self) -> 'StateTtlConfig.Builder':
"""
Disable default cleanup of expired state in background (enabled by default).
If some specific cleanup is configured, e.g. :func:`cleanup_incrementally` or
:func:`cleanup_in_rocksdb_compact_filter`, this setting does not disable it.
"""
self._is_cleanup_in_background = False
return self
def set_ttl(self, ttl: Time) -> 'StateTtlConfig.Builder':
"""
Sets the ttl time.
:param ttl: The ttl time.
"""
self._ttl = ttl
return self
def build(self) -> 'StateTtlConfig':
return StateTtlConfig(
self._update_type,
self._state_visibility,
self._ttl_time_characteristic,
self._ttl,
StateTtlConfig.CleanupStrategies(self._strategies, self._is_cleanup_in_background)
)
class CleanupStrategies(object):
"""
TTL cleanup strategies.
This class configures when to cleanup expired state with TTL. By default, state is always
cleaned up on explicit read access if found expired. Currently cleanup of state full
snapshot can be additionally activated.
"""
class Strategies(Enum):
"""
Fixed strategies ordinals in strategies config field.
"""
FULL_STATE_SCAN_SNAPSHOT = 0
INCREMENTAL_CLEANUP = 1
ROCKSDB_COMPACTION_FILTER = 2
def _to_proto(self):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
return getattr(
StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies, self.name)
@staticmethod
def _from_proto(proto):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
strategies_name = \
StateDescriptor.StateTTLConfig.CleanupStrategies.Strategies.Name(proto)
return StateTtlConfig.CleanupStrategies.Strategies[strategies_name]
class CleanupStrategy(ABC):
"""
Base interface for cleanup strategies configurations.
"""
pass
class EmptyCleanupStrategy(CleanupStrategy):
pass
class IncrementalCleanupStrategy(CleanupStrategy):
"""
Configuration of cleanup strategy while taking the full snapshot.
"""
def __init__(self, cleanup_size: int, run_cleanup_for_every_record: bool):
self._cleanup_size = cleanup_size
self._run_cleanup_for_every_record = run_cleanup_for_every_record
def get_cleanup_size(self) -> int:
return self._cleanup_size
def run_cleanup_for_every_record(self) -> bool:
return self._run_cleanup_for_every_record
class RocksdbCompactFilterCleanupStrategy(CleanupStrategy):
"""
Configuration of cleanup strategy using custom compaction filter in RocksDB.
"""
def __init__(self, query_time_after_num_entries: int):
self._query_time_after_num_entries = query_time_after_num_entries
def get_query_time_after_num_entries(self) -> int:
return self._query_time_after_num_entries
EMPTY_STRATEGY = EmptyCleanupStrategy()
def __init__(self,
strategies: Dict[Strategies, CleanupStrategy],
is_cleanup_in_background: bool):
self._strategies = strategies
self._is_cleanup_in_background = is_cleanup_in_background
def is_cleanup_in_background(self) -> bool:
return self._is_cleanup_in_background
def in_full_snapshot(self) -> bool:
return (StateTtlConfig.CleanupStrategies.Strategies.FULL_STATE_SCAN_SNAPSHOT in
self._strategies)
def get_incremental_cleanup_strategy(self) \
-> 'StateTtlConfig.CleanupStrategies.IncrementalCleanupStrategy':
if self._is_cleanup_in_background:
default_strategy = \
StateTtlConfig.CleanupStrategies.IncrementalCleanupStrategy(5, False)
else:
default_strategy = None
return self._strategies.get( # type: ignore
StateTtlConfig.CleanupStrategies.Strategies.INCREMENTAL_CLEANUP,
default_strategy)
def get_rocksdb_compact_filter_cleanup_strategy(self) \
-> 'StateTtlConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy':
if self._is_cleanup_in_background:
default_strategy = \
StateTtlConfig.CleanupStrategies.RocksdbCompactFilterCleanupStrategy(1000)
else:
default_strategy = None
return self._strategies.get( # type: ignore
StateTtlConfig.CleanupStrategies.Strategies.ROCKSDB_COMPACTION_FILTER,
default_strategy)
def _to_proto(self):
from pyflink.fn_execution.flink_fn_execution_pb2 import StateDescriptor
DescriptorCleanupStrategies = StateDescriptor.StateTTLConfig.CleanupStrategies
CleanupStrategies = StateTtlConfig.CleanupStrategies
cleanup_strategies = StateDescriptor.StateTTLConfig.CleanupStrategies()
cleanup_strategies.is_cleanup_in_background = self._is_cleanup_in_background
for k, v in self._strategies.items():
cleanup_strategy = cleanup_strategies.strategies.add()
cleanup_strategy.strategy = k._to_proto()
if isinstance(v, CleanupStrategies.EmptyCleanupStrategy):
empty_strategy = DescriptorCleanupStrategies.EmptyCleanupStrategy.EMPTY_STRATEGY
cleanup_strategy.empty_strategy = empty_strategy
elif isinstance(v, CleanupStrategies.IncrementalCleanupStrategy):
incremental_cleanup_strategy = \
DescriptorCleanupStrategies.IncrementalCleanupStrategy()
incremental_cleanup_strategy.cleanup_size = v._cleanup_size
incremental_cleanup_strategy.run_cleanup_for_every_record = \
v._run_cleanup_for_every_record
cleanup_strategy.incremental_cleanup_strategy.CopyFrom(
incremental_cleanup_strategy)
elif isinstance(v, CleanupStrategies.RocksdbCompactFilterCleanupStrategy):
rocksdb_compact_filter_cleanup_strategy = \
DescriptorCleanupStrategies.RocksdbCompactFilterCleanupStrategy()
rocksdb_compact_filter_cleanup_strategy.query_time_after_num_entries = \
v._query_time_after_num_entries
cleanup_strategy.rocksdb_compact_filter_cleanup_strategy.CopyFrom(
rocksdb_compact_filter_cleanup_strategy)
return cleanup_strategies
@staticmethod
def _from_proto(proto):
CleanupStrategies = StateTtlConfig.CleanupStrategies
strategies = {}
is_cleanup_in_background = proto.is_cleanup_in_background
for strategy_entry in proto.strategies:
strategy = CleanupStrategies.Strategies._from_proto(strategy_entry.strategy)
if strategy_entry.HasField('empty_strategy'):
strategies[strategy] = CleanupStrategies.EmptyCleanupStrategy
elif strategy_entry.HasField('incremental_cleanup_strategy'):
incremental_cleanup_strategy = strategy_entry.incremental_cleanup_strategy
strategies[strategy] = CleanupStrategies.IncrementalCleanupStrategy(
incremental_cleanup_strategy.cleanup_size,
incremental_cleanup_strategy.run_cleanup_for_every_record)
elif strategy_entry.HasField('rocksdb_compact_filter_cleanup_strategy'):
rocksdb_compact_filter_cleanup_strategy = \
strategy_entry.rocksdb_compact_filter_cleanup_strategy
strategies[strategy] = CleanupStrategies.RocksdbCompactFilterCleanupStrategy(
rocksdb_compact_filter_cleanup_strategy.query_time_after_num_entries)
return CleanupStrategies(strategies, is_cleanup_in_background)
| 38,741 | 37.320475 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/functions.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from py4j.java_gateway import JavaObject
from typing import Union, Any, Generic, TypeVar, Iterable
from pyflink.datastream.state import ValueState, ValueStateDescriptor, ListStateDescriptor, \
ListState, MapStateDescriptor, MapState, ReducingStateDescriptor, ReducingState, \
AggregatingStateDescriptor, AggregatingState, BroadcastState, ReadOnlyBroadcastState
from pyflink.datastream.time_domain import TimeDomain
from pyflink.datastream.timerservice import TimerService
from pyflink.java_gateway import get_gateway
from pyflink.metrics import MetricGroup
__all__ = [
'RuntimeContext',
'MapFunction',
'CoMapFunction',
'FlatMapFunction',
'CoFlatMapFunction',
'ReduceFunction',
'AggregateFunction',
'KeySelector',
'FilterFunction',
'Partitioner',
'SourceFunction',
'SinkFunction',
'ProcessFunction',
'CoProcessFunction',
'KeyedProcessFunction',
'KeyedCoProcessFunction',
'TimerService',
'WindowFunction',
'AllWindowFunction',
'ProcessWindowFunction',
'ProcessAllWindowFunction',
'BaseBroadcastProcessFunction',
'BroadcastProcessFunction',
'KeyedBroadcastProcessFunction',
]
W = TypeVar('W')
W2 = TypeVar('W2')
IN = TypeVar('IN')
IN1 = TypeVar('IN1')
IN2 = TypeVar('IN2')
OUT = TypeVar('OUT')
KEY = TypeVar('KEY')
class KeyedStateStore(ABC):
@abstractmethod
def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:
"""
Gets a handle to the system's key/value state. THe key/value state is only accessible if the
function is executed on a KeyedStream. On each access, the state exposes the value for the
key of the element currently processed by the function. Each function may have multiple
partitioned states, addressed with different names.
Because the scope of each value is the key of the currently processed element, and the
elements are distributed by the Flink runtime, the system can transparently scale out and
redistribute the state and KeyedStream.
"""
pass
@abstractmethod
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:
"""
Gets a handle to the system's key/value list state. This state is similar to the value state
access, but is optimized for state that holds lists. One can add elements to the list, or
retrieve the list as a whle.
This state is only accessible if the function is executed on a KeyedStream.
"""
pass
@abstractmethod
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:
"""
Gets a handle to the system's key/value map state. This state is similar to the value state
access, but is optimized for state that is composed of user-defined key-value pairs.
This state is only accessible if the function is executed on a KeyedStream.
"""
pass
@abstractmethod
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:
"""
Gets a handle to the system's key/value reducing state. This state is similar to the state
accessed via get_state(ValueStateDescriptor), but is optimized for state that aggregates
values.
This state is only accessible if the function is executed on a KeyedStream.
"""
pass
@abstractmethod
def get_aggregating_state(
self, state_descriptor: AggregatingStateDescriptor) -> AggregatingState:
"""
Gets a handle to the system's key/value aggregating state. This state is similar to the
state accessed via get_state(ValueStateDescriptor), but is optimized for state that
aggregates values with different types.
This state is only accessible if the function is executed on a KeyedStream.
"""
pass
class RuntimeContext(KeyedStateStore):
"""
A RuntimeContext contains information about the context in which functions are executed.
Each parallel instance of the function will have a context through which it can access
static contextual information (such as the current parallelism).
"""
@abstractmethod
def get_task_name(self) -> str:
"""
Returns the name of the task in which the UDF runs, as assigned during plan construction.
"""
pass
@abstractmethod
def get_number_of_parallel_subtasks(self) -> int:
"""
Gets the parallelism with which the parallel task runs.
"""
pass
@abstractmethod
def get_max_number_of_parallel_subtasks(self) -> int:
"""
Gets the number of max-parallelism with which the parallel task runs.
"""
pass
@abstractmethod
def get_index_of_this_subtask(self) -> int:
"""
Gets the number of this parallel subtask. The numbering starts from 0 and goes up to
parallelism-1 (parallelism as returned by
:func:`~RuntimeContext.get_number_of_parallel_subtasks`).
"""
pass
@abstractmethod
def get_attempt_number(self) -> int:
"""
Gets the attempt number of this parallel subtask. First attempt is numbered 0.
"""
pass
@abstractmethod
def get_task_name_with_subtasks(self) -> str:
"""
Returns the name of the task, appended with the subtask indicator, such as "MyTask (3/6)",
where 3 would be (:func:`~RuntimeContext.get_index_of_this_subtask` + 1), and 6 would be
:func:`~RuntimeContext.get_number_of_parallel_subtasks`.
"""
pass
@abstractmethod
def get_job_parameter(self, key: str, default_value: str):
"""
Gets the global job parameter value associated with the given key as a string.
"""
pass
@abstractmethod
def get_metrics_group(self) -> MetricGroup:
"""
Gets the metric group.
"""
pass
class Function(ABC):
"""
The base class for all user-defined functions.
"""
def open(self, runtime_context: RuntimeContext):
pass
def close(self):
pass
class MapFunction(Function):
"""
Base class for Map functions. Map functions take elements and transform them, element wise. A
Map function always produces a single result element for each input element. Typical
applications are parsing elements, converting data types, or projecting out fields. Operations
that produce multiple result elements from a single input element can be implemented using the
FlatMapFunction.
The basic syntax for using a MapFunction is as follows:
::
>>> ds = ...
>>> new_ds = ds.map(MyMapFunction())
"""
@abstractmethod
def map(self, value):
"""
The mapping method. Takes an element from the input data and transforms it into exactly one
element.
:param value: The input value.
:return: The transformed value.
"""
pass
class CoMapFunction(Function):
"""
A CoMapFunction implements a map() transformation over two connected streams.
The same instance of the transformation function is used to transform both of
the connected streams. That way, the stream transformations can share state.
The basic syntax for using a CoMapFunction is as follows:
::
>>> ds1 = ...
>>> ds2 = ...
>>> new_ds = ds1.connect(ds2).map(MyCoMapFunction())
"""
@abstractmethod
def map1(self, value):
"""
This method is called for each element in the first of the connected streams.
:param value: The stream element
:return: The resulting element
"""
pass
@abstractmethod
def map2(self, value):
"""
This method is called for each element in the second of the connected streams.
:param value: The stream element
:return: The resulting element
"""
pass
class FlatMapFunction(Function):
"""
Base class for flatMap functions. FlatMap functions take elements and transform them, into zero,
one, or more elements. Typical applications can be splitting elements, or unnesting lists and
arrays. Operations that produce multiple strictly one result element per input element can also
use the MapFunction.
The basic syntax for using a MapFUnction is as follows:
::
>>> ds = ...
>>> new_ds = ds.flat_map(MyFlatMapFunction())
"""
@abstractmethod
def flat_map(self, value):
"""
The core mthod of the FlatMapFunction. Takes an element from the input data and transforms
it into zero, one, or more elements.
A basic implementation of flat map is as follows:
::
>>> class MyFlatMapFunction(FlatMapFunction):
>>> def flat_map(self, value):
>>> for i in range(value):
>>> yield i
:param value: The input value.
:return: A generator
"""
pass
class CoFlatMapFunction(Function):
"""
A CoFlatMapFunction implements a flat-map transformation over two connected streams.
The same instance of the transformation function is used to transform both of the
connected streams. That way, the stream transformations can share state.
An example for the use of connected streams would be to apply rules that change over time
onto elements of a stream. One of the connected streams has the rules, the other stream the
elements to apply the rules to. The operation on the connected stream maintains the
current set of rules in the state. It may receive either a rule update (from the first stream)
and update the state, or a data element (from the second stream) and apply the rules in the
state to the element. The result of applying the rules would be emitted.
The basic syntax for using a CoFlatMapFunction is as follows:
::
>>> ds1 = ...
>>> ds2 = ...
>>> class MyCoFlatMapFunction(CoFlatMapFunction):
>>> def flat_map1(self, value):
>>> for i in range(value):
>>> yield i
>>> def flat_map2(self, value):
>>> for i in range(value):
>>> yield i
>>> new_ds = ds1.connect(ds2).flat_map(MyCoFlatMapFunction())
"""
@abstractmethod
def flat_map1(self, value):
"""
This method is called for each element in the first of the connected streams.
:param value: The input value.
:return: A generator
"""
pass
@abstractmethod
def flat_map2(self, value):
"""
This method is called for each element in the second of the connected streams.
:param value: The input value.
:return: A generator
"""
pass
class ReduceFunction(Function):
"""
Base interface for Reduce functions. Reduce functions combine groups of elements to a single
value, by taking always two elements and combining them into one. Reduce functions may be
used on entire data sets, or on grouped data sets. In the latter case, each group is reduced
individually.
The basic syntax for using a ReduceFunction is as follows:
::
>>> ds = ...
>>> new_ds = ds.key_by(lambda x: x[1]).reduce(MyReduceFunction())
"""
@abstractmethod
def reduce(self, value1, value2):
"""
The core method of ReduceFunction, combining two values into one value of the same type.
The reduce function is consecutively applied to all values of a group until only a single
value remains.
:param value1: The first value to combine.
:param value2: The second value to combine.
:return: The combined value of both input values.
"""
pass
class AggregateFunction(Function):
"""
The AggregateFunction is a flexible aggregation function, characterized by the following
features:
- The aggregates may use different types for input values, intermediate aggregates, and
result type, to support a wide range of aggregation types.
- Support for distributive aggregations: Different intermediate aggregates can be merged
together, to allow for pre-aggregation/final-aggregation optimizations.
The AggregateFunction's intermediate aggregate (in-progress aggregation state) is called the
`accumulator`. Values are added to the accumulator, and final aggregates are obtained by
finalizing the accumulator state. This supports aggregation functions where the intermediate
state needs to be different than the aggregated values and the final result type, such as for
example average (which typically keeps a count and sum). Merging intermediate aggregates
(partial aggregates) means merging the accumulators.
The AggregationFunction itself is stateless. To allow a single AggregationFunction instance to
maintain multiple aggregates (such as one aggregate per key), the AggregationFunction creates a
new accumulator whenever a new aggregation is started.
"""
@abstractmethod
def create_accumulator(self):
"""
Creates a new accumulator, starting a new aggregate.
The new accumulator is typically meaningless unless a value is added via
:func:`~AggregateFunction.add`.
The accumulator is the state of a running aggregation. When a program has multiple
aggregates in progress (such as per key and window), the state (per key and window) is the
size of the accumulator.
:return: A new accumulator, corresponding to an empty aggregate.
"""
pass
@abstractmethod
def add(self, value, accumulator):
"""
Adds the given input value to the given accumulator, returning the new accumulator value.
For efficiency, the input accumulator may be modified and returned.
:param value: The value to add.
:param accumulator: The accumulator to add the value to.
:return: The accumulator with the updated state.
"""
pass
@abstractmethod
def get_result(self, accumulator):
"""
Gets the result of the aggregation from the accumulator.
:param accumulator: The accumulator of the aggregation.
:return: The final aggregation result.
"""
pass
@abstractmethod
def merge(self, acc_a, acc_b):
"""
Merges two accumulators, returning an accumulator with the merged state.
This function may reuse any of the given accumulators as the target for the merge and
return that. The assumption is that the given accumulators will not be used any more after
having been passed to this function.
:param acc_a: An accumulator to merge.
:param acc_b: Another accumulator to merge.
:return: The accumulator with the merged state.
"""
pass
class KeySelector(Function):
"""
The KeySelector allows to use deterministic objects for operations such as reduce, reduceGroup,
join coGroup, etc. If invoked multiple times on the same object, the returned key must be the
same. The extractor takes an object an returns the deterministic key for that object.
"""
@abstractmethod
def get_key(self, value):
"""
User-defined function that deterministically extracts the key from an object.
:param value: The object to get the key from.
:return: The extracted key.
"""
pass
class NullByteKeySelector(KeySelector):
"""
Used as a dummy KeySelector to allow using keyed operators for non-keyed use cases. Essentially,
it gives all incoming records the same key, which is a (byte) 0 value.
"""
def get_key(self, value):
return 0
class FilterFunction(Function):
"""
A filter function is a predicate applied individually to each record. The predicate decides
whether to keep the element, or to discard it.
The basic syntax for using a FilterFunction is as follows:
::
>>> ds = ...
>>> result = ds.filter(MyFilterFunction())
Note that the system assumes that the function does not modify the elements on which the
predicate is applied. Violating this assumption can lead to incorrect results.
"""
@abstractmethod
def filter(self, value):
"""
The filter function that evaluates the predicate.
:param value: The value to be filtered.
:return: True for values that should be retained, false for values to be filtered out.
"""
pass
class Partitioner(Function):
"""
Function to implement a custom partition assignment for keys.
"""
@abstractmethod
def partition(self, key: Any, num_partitions: int) -> int:
"""
Computes the partition for the given key.
:param key: The key.
:param num_partitions: The number of partitions to partition into.
:return: The partition index.
"""
pass
class FunctionWrapper(Function):
"""
A basic wrapper class for user defined function.
"""
def __init__(self, func):
self._func = func
class ReduceFunctionWrapper(FunctionWrapper):
"""
A wrapper class for ReduceFunction. It's used for wrapping up user defined function in a
ReduceFunction when user does not implement a ReduceFunction but directly pass a function
object or a lambda function to reduce() function.
"""
def __init__(self, func):
"""
The constructor of ReduceFunctionWrapper.
:param func: user defined function object.
"""
super(ReduceFunctionWrapper, self).__init__(func)
def reduce(self, value1, value2):
"""
A delegated reduce function to invoke user defined function.
:param value1: The first value to combine.
:param value2: The second value to combine.
:return: The combined value of both input values.
"""
return self._func(value1, value2)
def _get_python_env():
"""
An util function to get a python user defined function execution environment.
"""
gateway = get_gateway()
exec_type = gateway.jvm.org.apache.flink.table.functions.python.PythonEnv.ExecType.PROCESS
return gateway.jvm.org.apache.flink.table.functions.python.PythonEnv(exec_type)
class JavaFunctionWrapper(object):
"""
A wrapper class that maintains a Function implemented in Java.
"""
def __init__(self, j_function: Union[str, JavaObject]):
if isinstance(j_function, str):
j_func_class = get_gateway().jvm.__getattr__(j_function)
j_function = j_func_class()
self._j_function = j_function
def get_java_function(self):
return self._j_function
class SourceFunction(JavaFunctionWrapper):
"""
Base class for all stream data source in Flink.
"""
def __init__(self, source_func: Union[str, JavaObject]):
"""
Constructor of SinkFunction.
:param source_func: The java SourceFunction object.
"""
super(SourceFunction, self).__init__(source_func)
class SinkFunction(JavaFunctionWrapper):
"""
The base class for SinkFunctions.
"""
def __init__(self, sink_func: Union[str, JavaObject]):
"""
Constructor of SinkFunction.
:param sink_func: The java SinkFunction object or the full name of the SinkFunction class.
"""
super(SinkFunction, self).__init__(sink_func)
class ProcessFunction(Function):
"""
A function that process elements of a stream.
For every element in the input stream process_element(value, ctx, out) is invoked. This can
produce zero or more elements as output. Implementations can also query the time and set timers
through the provided Context. For firing timers on_timer(long, ctx, out) will be invoked. This
can again produce zero or more elements as output and register further timers.
Note that access to keyed state and timers (which are also scoped to a key) is only available if
the ProcessFunction is applied on a KeyedStream.
"""
class Context(ABC):
"""
Information available in an invocation of process_element(value, ctx, out) or
on_timer(value, ctx, out).
"""
@abstractmethod
def timer_service(self) -> TimerService:
"""
A Timer service for querying time and registering timers.
"""
pass
@abstractmethod
def timestamp(self) -> int:
"""
Timestamp of the element currently being processed or timestamp of a firing timer.
This might be None, for example if the time characteristic of your program is set to
TimeCharacteristic.ProcessTime.
"""
pass
@abstractmethod
def process_element(self, value, ctx: 'ProcessFunction.Context'):
"""
Process one element from the input stream.
This function can output zero or more elements using the Collector parameter and also update
internal state or set timers using the Context parameter.
:param value: The input value.
:param ctx: A Context that allows querying the timestamp of the element and getting a
TimerService for registering timers and querying the time. The context is only
valid during the invocation of this method, do not store it.
"""
pass
class KeyedProcessFunction(Function):
"""
A keyed function processes elements of a stream.
For every element in the input stream, process_element() is invoked. This can produce zero or
more elements as output. Implementations can also query the time and set timers through the
provided Context. For firing timers on_timer() will be invoked. This can again produce zero or
more elements as output and register further timers.
Note that access to keyed state and timers (which are also scoped to a key) is only available if
the KeyedProcessFunction is applied on a KeyedStream.
"""
class Context(ABC):
@abstractmethod
def get_current_key(self):
pass
@abstractmethod
def timer_service(self) -> TimerService:
"""
A Timer service for querying time and registering timers.
"""
pass
@abstractmethod
def timestamp(self) -> int:
"""
Timestamp of the element currently being processed or timestamp of a firing timer.
This might be None, for example if the time characteristic of your program is set to
TimeCharacteristic.ProcessTime.
"""
pass
class OnTimerContext(Context):
@abstractmethod
def time_domain(self) -> TimeDomain:
"""
The TimeDomain of the firing timer.
:return: The TimeDomain of current fired timer.
"""
pass
@abstractmethod
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
"""
Process one element from the input stream.
This function can output zero or more elements and also update
internal state or set timers using the Context parameter.
:param value: The input value.
:param ctx: A Context that allows querying the timestamp of the element and getting a
TimerService for registering timers and querying the time. The context is only
valid during the invocation of this method, do not store it.
"""
pass
def on_timer(self, timestamp: int, ctx: 'KeyedProcessFunction.OnTimerContext'):
"""
Called when a timer set using TimerService fires.
:param timestamp: The timestamp of the firing timer.
:param ctx: An OnTimerContext that allows querying the timestamp of the firing timer,
querying the TimeDomain of the firing timer and getting a TimerService for
registering timers and querying the time. The context is only valid during the
invocation of this method, do not store it.
"""
pass
class CoProcessFunction(Function):
"""
A function that processes elements of two streams and produces a single output one.
The function will be called for every element in the input streams and can produce zero or
more output elements. Contrary to the :class:`CoFlatMapFunction`, this function can also query
the time (both event and processing) and set timers, through the provided
:class:`CoProcessFunction.Context`. When reacting to the firing of set timers the function can
emit yet more elements.
An example use-case for connected streams would be the application of a set of rules that
change over time ({@code stream A}) to the elements contained in another stream (stream {@code
B}). The rules contained in {@code stream A} can be stored in the state and wait for new
elements to arrive on {@code stream B}. Upon reception of a new element on {@code stream B},
the function can now apply the previously stored rules to the element and directly emit a
result, and/or register a timer that will trigger an action in the future.
"""
class Context(ABC):
@abstractmethod
def timer_service(self) -> TimerService:
"""
A Timer service for querying time and registering timers.
"""
pass
@abstractmethod
def timestamp(self) -> int:
"""
Timestamp of the element currently being processed or timestamp of a firing timer.
This might be None, for example if the time characteristic of your program is set to
TimeCharacteristic.ProcessTime.
"""
pass
@abstractmethod
def process_element1(self, value, ctx: 'CoProcessFunction.Context'):
"""
This method is called for each element in the first of the connected streams.
This function can output zero or more elements using the Collector parameter and also update
internal state or set timers using the Context parameter.
:param value: The input value.
:param ctx: A Context that allows querying the timestamp of the element and getting a
TimerService for registering timers and querying the time. The context is only
valid during the invocation of this method, do not store it.
"""
pass
@abstractmethod
def process_element2(self, value, ctx: 'CoProcessFunction.Context'):
"""
This method is called for each element in the second of the connected streams.
This function can output zero or more elements using the Collector parameter and also update
internal state or set timers using the Context parameter.
:param value: The input value.
:param ctx: A Context that allows querying the timestamp of the element and getting a
TimerService for registering timers and querying the time. The context is only
valid during the invocation of this method, do not store it.
"""
pass
class KeyedCoProcessFunction(Function):
"""
A function that processes elements of two keyed streams and produces a single output one.
The function will be called for every element in the input streams and can produce zero or
more output elements. Contrary to the :class:`CoFlatMapFunction`, this function can also query the
time (both event and processing) and set timers, through the provided {@link Context}. When
reacting to the firing of set timers the function can emit yet more elements.
An example use-case for connected streams would be the application of a set of rules that
change over time ({@code stream A}) to the elements contained in another stream (stream {@code
B}). The rules contained in {@code stream A} can be stored in the state and wait for new elements
to arrive on {@code stream B}. Upon reception of a new element on {@code stream B}, the function
can now apply the previously stored rules to the element and directly emit a result, and/or
register a timer that will trigger an action in the future.
"""
class Context(ABC):
@abstractmethod
def get_current_key(self):
pass
@abstractmethod
def timer_service(self) -> TimerService:
"""
A Timer service for querying time and registering timers.
"""
pass
@abstractmethod
def timestamp(self) -> int:
"""
Timestamp of the element currently being processed or timestamp of a firing timer.
This might be None, for example if the time characteristic of your program is set to
TimeCharacteristic.ProcessTime.
"""
pass
class OnTimerContext(Context):
@abstractmethod
def time_domain(self) -> TimeDomain:
"""
The TimeDomain of the firing timer.
:return: The TimeDomain of current fired timer.
"""
pass
@abstractmethod
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
"""
Process one element from the input stream.
This function can output zero or more elements using the Collector parameter and also update
internal state or set timers using the Context parameter.
:param value: The input value.
:param ctx: A Context that allows querying the timestamp of the element and getting a
TimerService for registering timers and querying the time. The context is only
valid during the invocation of this method, do not store it.
"""
pass
@abstractmethod
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
"""
Process one element from the input stream.
This function can output zero or more elements using the Collector parameter and also update
internal state or set timers using the Context parameter.
:param value: The input value.
:param ctx: A Context that allows querying the timestamp of the element and getting a
TimerService for registering timers and querying the time. The context is only
valid during the invocation of this method, do not store it.
"""
pass
def on_timer(self, timestamp: int, ctx: 'KeyedCoProcessFunction.OnTimerContext'):
"""
Called when a timer set using TimerService fires.
:param timestamp: The timestamp of the firing timer.
:param ctx: An OnTimerContext that allows querying the timestamp of the firing timer,
querying the TimeDomain of the firing timer and getting a TimerService for
registering timers and querying the time. The context is only valid during the
invocation of this method, do not store it.
"""
pass
class WindowFunction(Function, Generic[IN, OUT, KEY, W]):
"""
Base interface for functions that are evaluated over keyed (grouped) windows.
"""
@abstractmethod
def apply(self, key: KEY, window: W, inputs: Iterable[IN]) -> Iterable[OUT]:
"""
Evaluates the window and outputs none or several elements.
:param key: The key for which this window is evaluated.
:param window: The window that is being evaluated.
:param inputs: The elements in the window being evaluated.
"""
pass
class AllWindowFunction(Function, Generic[IN, OUT, W]):
"""
Base interface for functions that are evaluated over non-keyed windows.
"""
@abstractmethod
def apply(self, window: W, inputs: Iterable[IN]) -> Iterable[OUT]:
"""
Evaluates the window and outputs none or several elements.
:param window: The window that is being evaluated.
:param inputs: The elements in the window being evaluated.
"""
pass
class ProcessWindowFunction(Function, Generic[IN, OUT, KEY, W]):
"""
Base interface for functions that are evaluated over keyed (grouped) windows using a context
for retrieving extra information.
"""
class Context(ABC, Generic[W2]):
"""
The context holding window metadata.
"""
@abstractmethod
def window(self) -> W2:
"""
:return: The window that is being evaluated.
"""
pass
@abstractmethod
def current_processing_time(self) -> int:
"""
:return: The current processing time.
"""
pass
@abstractmethod
def current_watermark(self) -> int:
"""
:return: The current event-time watermark.
"""
pass
@abstractmethod
def window_state(self) -> KeyedStateStore:
"""
State accessor for per-key and per-window state.
.. note::
If you use per-window state you have to ensure that you clean it up by implementing
:func:`~ProcessWindowFunction.clear`.
:return: The :class:`KeyedStateStore` used to access per-key and per-window states.
"""
pass
@abstractmethod
def global_state(self) -> KeyedStateStore:
"""
State accessor for per-key global state.
"""
pass
@abstractmethod
def process(self,
key: KEY,
context: 'ProcessWindowFunction.Context',
elements: Iterable[IN]) -> Iterable[OUT]:
"""
Evaluates the window and outputs none or several elements.
:param key: The key for which this window is evaluated.
:param context: The context in which the window is being evaluated.
:param elements: The elements in the window being evaluated.
:return: The iterable object which produces the elements to emit.
"""
pass
def clear(self, context: 'ProcessWindowFunction.Context') -> None:
"""
Deletes any state in the :class:`Context` when the Window expires (the watermark passes its
max_timestamp + allowed_lateness).
:param context: The context to which the window is being evaluated.
"""
pass
class ProcessAllWindowFunction(Function, Generic[IN, OUT, W]):
"""
Base interface for functions that are evaluated over non-keyed windows using a context
for retrieving extra information.
"""
class Context(ABC, Generic[W2]):
"""
The context holding window metadata.
"""
@abstractmethod
def window(self) -> W2:
"""
:return: The window that is being evaluated.
"""
pass
@abstractmethod
def window_state(self) -> KeyedStateStore:
"""
State accessor for per-key and per-window state.
.. note::
If you use per-window state you have to ensure that you clean it up by implementing
:func:`~ProcessAllWindowFunction.clear`.
:return: The :class:`KeyedStateStore` used to access per-key and per-window states.
"""
pass
@abstractmethod
def global_state(self) -> KeyedStateStore:
"""
State accessor for per-key global state.
"""
pass
@abstractmethod
def process(self,
context: 'ProcessAllWindowFunction.Context',
elements: Iterable[IN]) -> Iterable[OUT]:
"""
Evaluates the window and outputs none or several elements.
:param context: The context in which the window is being evaluated.
:param elements: The elements in the window being evaluated.
:return: The iterable object which produces the elements to emit.
"""
pass
def clear(self, context: 'ProcessAllWindowFunction.Context') -> None:
"""
Deletes any state in the :class:`Context` when the Window expires (the watermark passes its
max_timestamp + allowed_lateness).
:param context: The context to which the window is being evaluated.
"""
pass
class PassThroughWindowFunction(WindowFunction[IN, IN, KEY, W]):
def apply(self, key: KEY, window: W, inputs: Iterable[IN]) -> Iterable[IN]:
yield from inputs
class PassThroughAllWindowFunction(AllWindowFunction[IN, IN, W]):
def apply(self, window: W, inputs: Iterable[IN]) -> Iterable[IN]:
yield from inputs
class InternalWindowFunction(Function, Generic[IN, OUT, KEY, W]):
class InternalWindowContext(ABC):
@abstractmethod
def current_processing_time(self) -> int:
pass
@abstractmethod
def current_watermark(self) -> int:
pass
@abstractmethod
def window_state(self) -> KeyedStateStore:
pass
@abstractmethod
def global_state(self) -> KeyedStateStore:
pass
@abstractmethod
def process(self,
key: KEY,
window: W,
context: InternalWindowContext,
input_data: IN) -> Iterable[OUT]:
pass
@abstractmethod
def clear(self, window: W, context: InternalWindowContext):
pass
class InternalSingleValueWindowFunction(InternalWindowFunction[IN, OUT, KEY, W]):
def __init__(self, wrapped_function: WindowFunction):
self._wrapped_function = wrapped_function
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: KEY,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: IN) -> Iterable[OUT]:
return self._wrapped_function.apply(key, window, [input_data])
def clear(self, window: W, context: InternalWindowFunction.InternalWindowContext):
pass
class InternalSingleValueAllWindowFunction(InternalWindowFunction[IN, OUT, int, W]):
def __init__(self, wrapped_function: AllWindowFunction):
self._wrapped_function = wrapped_function
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: int,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: IN) -> Iterable[OUT]:
return self._wrapped_function.apply(window, [input_data])
def clear(self, window: W, context: InternalWindowFunction.InternalWindowContext):
pass
class InternalIterableWindowFunction(InternalWindowFunction[Iterable[IN], OUT, KEY, W]):
def __init__(self, wrapped_function: WindowFunction):
self._wrapped_function = wrapped_function
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: KEY,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: Iterable[IN]) -> Iterable[OUT]:
return self._wrapped_function.apply(key, window, input_data)
def clear(self,
window: W,
context: InternalWindowFunction.InternalWindowContext):
pass
class InternalIterableAllWindowFunction(InternalWindowFunction[Iterable[IN], OUT, int, W]):
def __init__(self, wrapped_function: AllWindowFunction):
self._wrapped_function = wrapped_function
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: int,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: Iterable[IN]) -> Iterable[OUT]:
return self._wrapped_function.apply(window, input_data)
def clear(self,
window: W,
context: InternalWindowFunction.InternalWindowContext):
pass
class InternalProcessWindowContext(ProcessWindowFunction.Context[W]):
def __init__(self):
self._underlying = None
self._window = None
def window(self) -> W:
return self._window
def current_processing_time(self) -> int:
return self._underlying.current_processing_time()
def current_watermark(self) -> int:
return self._underlying.current_watermark()
def window_state(self) -> KeyedStateStore:
return self._underlying.window_state()
def global_state(self) -> KeyedStateStore:
return self._underlying.global_state()
class InternalProcessAllWindowContext(ProcessAllWindowFunction.Context[W]):
def __init__(self):
self._underlying = None
self._window = None
def window(self) -> W:
return self._window
def window_state(self) -> KeyedStateStore:
return self._underlying.window_state()
def global_state(self) -> KeyedStateStore:
return self._underlying.global_state()
class InternalSingleValueProcessWindowFunction(InternalWindowFunction[IN, OUT, KEY, W]):
def __init__(self, wrapped_function: ProcessWindowFunction):
self._wrapped_function = wrapped_function
self._internal_context = \
InternalProcessWindowContext() # type: InternalProcessWindowContext
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: KEY,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: IN) -> Iterable[OUT]:
self._internal_context._window = window
self._internal_context._underlying = context
return self._wrapped_function.process(key, self._internal_context, [input_data])
def clear(self, window: W, context: InternalWindowFunction.InternalWindowContext):
self._internal_context._window = window
self._internal_context._underlying = context
self._wrapped_function.clear(self._internal_context)
class InternalSingleValueProcessAllWindowFunction(InternalWindowFunction[IN, OUT, int, W]):
def __init__(self, wrapped_function: ProcessAllWindowFunction):
self._wrapped_function = wrapped_function
self._internal_context = \
InternalProcessAllWindowContext() # type: InternalProcessAllWindowContext
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: int,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: IN) -> Iterable[OUT]:
self._internal_context._window = window
self._internal_context._underlying = context
return self._wrapped_function.process(self._internal_context, [input_data])
def clear(self, window: W, context: InternalWindowFunction.InternalWindowContext):
self._internal_context._window = window
self._internal_context._underlying = context
self._wrapped_function.clear(self._internal_context)
class InternalIterableProcessWindowFunction(InternalWindowFunction[Iterable[IN], OUT, KEY, W]):
def __init__(self, wrapped_function: ProcessWindowFunction):
self._wrapped_function = wrapped_function
self._internal_context = \
InternalProcessWindowContext() # type: InternalProcessWindowContext
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: KEY,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: Iterable[IN]) -> Iterable[OUT]:
self._internal_context._window = window
self._internal_context._underlying = context
return self._wrapped_function.process(key, self._internal_context, input_data)
def clear(self, window: W, context: InternalWindowFunction.InternalWindowContext):
self._internal_context._window = window
self._internal_context._underlying = context
self._wrapped_function.clear(self._internal_context)
class BaseBroadcastProcessFunction(Function):
"""
The base class containing the functionality available to all broadcast process functions. These
include :class:`BroadcastProcessFunction` and :class:`KeyedBroadcastProcessFunction`.
"""
class BaseContext(ABC):
"""
The base context available to all methods in a broadcast process function. This includes
:class:`BroadcastProcessFunction` and :class:`KeyedBroadcastProcessFunction`.
"""
@abstractmethod
def timestamp(self) -> int:
"""
Timestamp of the element currently being processed or timestamp of a firing timer.
This might be None, for example if the time characteristic of your program is
set to :attr:`TimeCharacteristic.ProcessingTime`.
"""
pass
@abstractmethod
def current_processing_time(self) -> int:
"""Returns the current processing time."""
pass
@abstractmethod
def current_watermark(self) -> int:
"""Returns the current watermark."""
pass
class Context(BaseContext):
"""
A :class:`BaseContext` available to the broadcasted stream side of a
:class:`BroadcastConnectedStream`.
Apart from the basic functionality of a :class:`BaseContext`, this also allows to get and
update the elements stored in the :class:`BroadcastState`. In other words, it gives read/
write access to the broadcast state.
"""
@abstractmethod
def get_broadcast_state(self, state_descriptor: MapStateDescriptor) -> BroadcastState:
"""
Fetches the :class:`BroadcastState` with the specified name.
:param state_descriptor: the :class:`MapStateDescriptor` of the state to be fetched.
:return: The required :class:`BroadcastState`.
"""
pass
class ReadOnlyContext(BaseContext):
"""
A :class:`BaseContext` available to the non-broadcasted stream side of a
:class:`BroadcastConnectedStream`.
Apart from the basic functionality of a :class:`BaseContext`, this also allows to get
read-only access to the elements stored in the broadcast state.
"""
@abstractmethod
def get_broadcast_state(
self, state_descriptor: MapStateDescriptor
) -> ReadOnlyBroadcastState:
"""
Fetches a read-only view of the broadcast state with the specified name.
:param state_descriptor: the :class:`MapStateDescriptor` of the state to be fetched.
:return: The required read-only view of the broadcast state.
"""
pass
class BroadcastProcessFunction(BaseBroadcastProcessFunction, Generic[IN1, IN2, OUT]):
"""
A function to be applied to a :class:`BroadcastConnectedStream` that connects
:class:`BroadcastStream`, i.e. a stream with broadcast state, with a non-keyed
:class:`DataStream`.
The stream with the broadcast state can be created using the :meth:`DataStream.broadcast`
method.
The user has to implement two methods:
* the :meth:`process_broadcast_element` which will be applied to each element in the broadcast
side
* the :meth:`process_element` which will be applied to the non-broadcasted side.
The :meth:`process_broadcast_element` takes a context as an argument (among others), which
allows it to read/write to the broadcast state, while the :meth:`process_element` has read-only
access to the broadcast state.
.. versionadded:: 1.16.0
"""
class Context(BaseBroadcastProcessFunction.Context, ABC):
"""
A :class:`BaseBroadcastProcessFunction.Context` available to the broadcast side of a
:class:`BroadcastConnectedStream`.
"""
pass
class ReadOnlyContext(BaseBroadcastProcessFunction.ReadOnlyContext, ABC):
"""
A :class:`BaseBroadcastProcessFunction.ReadOnlyContext` available to the non-keyed side of a
:class:`BroadcastConnectedStream` (if any).
"""
pass
@abstractmethod
def process_element(self, value: IN1, ctx: ReadOnlyContext):
"""
This method is called for each element in the (non-broadcast) :class:`DataStream`.
This function can output zero or more elements via :code:`yield` statement, and query the
current processing/event time. Finally, it has read-only access to the broadcast state. The
context is only valid during the invocation of this method, do not store it.
:param value: The stream element.
:param ctx: A :class:`BroadcastProcessFunction.ReadOnlyContext` that allows querying the
timestamp of the element, querying the current processing/event time and reading the
broadcast state. The context is only valid during the invocation of this method, do not
store it.
"""
pass
@abstractmethod
def process_broadcast_element(self, value: IN2, ctx: Context):
"""
This method is called for each element in the :class:`BroadcastStream`.
This function can output zero or more elements via :code:`yield` statement, query the
current processing/event time, and also query and update the internal
:class:`state.BroadcastState`. These can be done through the provided
:class:`BroadcastProcessFunction.Context`. The context is only valid during the invocation
of this method, do not store it.
:param value: The stream element.
:param ctx: A :class:`BroadcastProcessFunction.Context` that allows querying the timestamp
of the element, querying the current processing/event time and updating the broadcast
state. The context is only valid during the invocation of this method, do not store it.
"""
pass
class KeyedBroadcastProcessFunction(BaseBroadcastProcessFunction, Generic[KEY, IN1, IN2, OUT]):
"""
A function to be applied to a :class:`BroadcastConnectedStream` that connects
:class:`BroadcastStream`, i.e. a stream with broadcast state, with a :class:`KeyedStream`.
The stream with the broadcast state can be created using the :meth:`DataStream.broadcast`
method.
The user has to implement two methods:
* the :meth:`process_broadcast_element` which will be applied to each element in the broadcast
side
* the :meth:`process_element` which will be applied to the non-broadcasted/keyed side.
The :meth:`process_broadcast_element` takes a context as an argument (among others), which
allows it to read/write to the broadcast state, while the :meth:`process_element` has read-only
access to the broadcast state, but can read/write to the keyed state and register timers.
.. versionadded:: 1.16.0
"""
class Context(BaseBroadcastProcessFunction.Context, ABC):
"""
A :class:`BaseBroadcastProcessFunction.Context` available to the broadcast side of a
:class:`BroadcastConnectedStream`.
Currently, the function ``applyToKeyedState`` in Java is not supported in PyFlink.
"""
pass
class ReadOnlyContext(BaseBroadcastProcessFunction.ReadOnlyContext, ABC):
"""
A :class:`BaseBroadcastProcessFunction.ReadOnlyContext` available to the non-keyed side of a
:class:`BroadcastConnectedStream` (if any).
Apart from the basic functionality of a :class:`BaseBroadcastProcessFunction.Context`, this
also allows to get a read-only iterator over the elements stored in the broadcast state and
a :class:`TimerService` for querying time and registering timers.
"""
@abstractmethod
def timer_service(self) -> TimerService:
"""
A :class:`TimerService` for querying time and registering timers.
"""
pass
@abstractmethod
def get_current_key(self) -> KEY:
"""
Get key of the element being processed.
"""
pass
class OnTimerContext(ReadOnlyContext, ABC):
"""
Information available in an invocation of :meth:`KeyedBroadcastProcessFunction.on_timer`.
"""
@abstractmethod
def time_domain(self) -> TimeDomain:
"""
The :class:`TimeDomain` of the firing timer, i.e. if it is event or processing time
timer.
"""
pass
@abstractmethod
def get_current_key(self) -> KEY:
"""
Get the key of the firing timer.
"""
pass
@abstractmethod
def process_element(self, value: IN1, ctx: ReadOnlyContext):
"""
This method is called for each element in the (non-broadcast) :class:`KeyedStream`.
It can output zero or more elements via ``yield`` statement, query the current
processing/event time, and also query and update the local keyed state. In addition,
it can get a :class:`TimerService` for registering timers and querying the time. Finally, it
has read-only access to the broadcast state. The context is only valid during the invocation
of this method, do not store it.
:param value: The stream element.
:param ctx: A :class:`KeyedBroadcastProcessFunction.ReadOnlyContext` that allows querying
the timestamp of the element, querying the current processing/event time and iterating
the broadcast state with read-only access. The context is only valid during the
invocation of this method, do not store it.
"""
pass
@abstractmethod
def process_broadcast_element(self, value: IN2, ctx: Context):
"""
This method is called for each element in the :class:`BroadcastStream`.
It can output zero or more elements via ``yield`` statement, query the current
processing/event time, and also query and update the internal :class:`state.BroadcastState`.
Currently, ``applyToKeyedState`` is not supported in PyFlink. The context is only valid
during the invocation of this method, do not store it.
:param value: The stream element.
:param ctx: A :class:`KeyedBroadcastProcessFunction.Context` that allows querying the
timestamp of the element, querying the current processing/event time and updating the
broadcast state. The context is only valid during the invocation of this method, do not
store it.
"""
pass
def on_timer(self, timestamp: int, ctx: OnTimerContext):
"""
Called when a timer set using :class:`TimerService` fires.
:param timestamp: The timestamp of the firing timer.
:param ctx: An :class:`KeyedBroadcastProcessFunction.OnTimerContext` that allows querying
the timestamp of the firing timer, querying the current processing/event time, iterating
the broadcast state with read-only access, querying the :class:`TimeDomain` of the
firing timer and getting a :class:`TimerService` for registering timers and querying the
time. The context is only valid during the invocation of this method, do not store it.
"""
pass
class InternalIterableProcessAllWindowFunction(InternalWindowFunction[Iterable[IN], OUT, int, W]):
def __init__(self, wrapped_function: ProcessAllWindowFunction):
self._wrapped_function = wrapped_function
self._internal_context = \
InternalProcessAllWindowContext() # type: InternalProcessAllWindowContext
def open(self, runtime_context: RuntimeContext):
self._wrapped_function.open(runtime_context)
def close(self):
self._wrapped_function.close()
def process(self,
key: int,
window: W,
context: InternalWindowFunction.InternalWindowContext,
input_data: Iterable[IN]) -> Iterable[OUT]:
self._internal_context._window = window
self._internal_context._underlying = context
return self._wrapped_function.process(self._internal_context, input_data)
def clear(self, window: W, context: InternalWindowFunction.InternalWindowContext):
self._internal_context._window = window
self._internal_context._underlying = context
self._wrapped_function.clear(self._internal_context)
| 58,855 | 35.107975 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import ast
import datetime
import pickle
from abc import abstractmethod
from pyflink.common import Row, RowKind, Configuration
from pyflink.common.typeinfo import (RowTypeInfo, TupleTypeInfo, Types, BasicArrayTypeInfo,
PrimitiveArrayTypeInfo, MapTypeInfo, ListTypeInfo,
ObjectArrayTypeInfo, ExternalTypeInfo, TypeInformation)
from pyflink.java_gateway import get_gateway
class ResultTypeQueryable(object):
@abstractmethod
def get_produced_type(self) -> TypeInformation:
pass
def create_hadoop_configuration(config: Configuration):
jvm = get_gateway().jvm
hadoop_config = jvm.org.apache.hadoop.conf.Configuration()
for k, v in config.to_dict().items():
hadoop_config.set(k, v)
return hadoop_config
def create_java_properties(config: Configuration):
jvm = get_gateway().jvm
properties = jvm.java.util.Properties()
for k, v in config.to_dict().items():
properties.put(k, v)
return properties
def convert_to_python_obj(data, type_info):
if type_info == Types.PICKLED_BYTE_ARRAY():
return pickle.loads(data)
elif isinstance(type_info, ExternalTypeInfo):
return convert_to_python_obj(data, type_info._type_info)
else:
gateway = get_gateway()
pickled_bytes = gateway.jvm.PythonBridgeUtils. \
getPickledBytesFromJavaObject(data, type_info.get_java_type_info())
return pickled_bytes_to_python_obj(pickled_bytes, type_info)
def pickled_bytes_to_python_obj(data, type_info):
if isinstance(type_info, RowTypeInfo):
row_kind = RowKind(int.from_bytes(data[0], 'little'))
field_data_with_types = zip(list(data[1:]), type_info.get_field_types())
fields = []
for field_data, field_type in field_data_with_types:
if len(field_data) == 0:
fields.append(None)
else:
fields.append(pickled_bytes_to_python_obj(field_data, field_type))
row = Row.of_kind(row_kind, *fields)
row.set_field_names(type_info.get_field_names())
return row
elif isinstance(type_info, TupleTypeInfo):
field_data_with_types = zip(data, type_info.get_field_types())
fields = []
for field_data, field_type in field_data_with_types:
if len(field_data) == 0:
fields.append(None)
else:
fields.append(pickled_bytes_to_python_obj(field_data, field_type))
return tuple(fields)
else:
data = pickle.loads(data)
if type_info == Types.SQL_TIME():
seconds, microseconds = divmod(data, 10 ** 6)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
return datetime.time(hours, minutes, seconds, microseconds)
elif type_info == Types.SQL_DATE():
return type_info.from_internal_type(data)
elif type_info == Types.SQL_TIMESTAMP():
return type_info.from_internal_type(int(data.timestamp() * 10 ** 6))
elif type_info == Types.FLOAT():
return type_info.from_internal_type(ast.literal_eval(data))
elif isinstance(type_info,
(BasicArrayTypeInfo, PrimitiveArrayTypeInfo, ObjectArrayTypeInfo)):
element_type = type_info._element_type
elements = []
for element_bytes in data:
elements.append(pickled_bytes_to_python_obj(element_bytes, element_type))
return elements
elif isinstance(type_info, MapTypeInfo):
key_type = type_info._key_type_info
value_type = type_info._value_type_info
zip_kv = zip(data[0], data[1])
return dict((pickled_bytes_to_python_obj(k, key_type),
pickled_bytes_to_python_obj(v, value_type))
for k, v in zip_kv)
elif isinstance(type_info, ListTypeInfo):
element_type = type_info.elem_type
elements = []
for element_bytes in data:
elements.append(pickled_bytes_to_python_obj(element_bytes, element_type))
return elements
else:
return type_info.from_internal_type(data)
| 5,238 | 41.942623 | 92 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/data_stream.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
import uuid
from enum import Enum
from typing import Callable, Union, List, cast, Optional, overload
from pyflink.util.java_utils import get_j_env_configuration
from pyflink.common import typeinfo, ExecutionConfig, Row
from pyflink.common.typeinfo import RowTypeInfo, Types, TypeInformation, _from_java_type
from pyflink.common.watermark_strategy import WatermarkStrategy, TimestampAssigner
from pyflink.datastream.connectors import Sink
from pyflink.datastream.functions import (_get_python_env, FlatMapFunction, MapFunction, Function,
FunctionWrapper, SinkFunction, FilterFunction,
KeySelector, ReduceFunction, CoMapFunction,
CoFlatMapFunction, Partitioner, RuntimeContext,
ProcessFunction, KeyedProcessFunction,
KeyedCoProcessFunction, WindowFunction,
ProcessWindowFunction, InternalWindowFunction,
InternalIterableWindowFunction,
InternalIterableProcessWindowFunction, CoProcessFunction,
InternalSingleValueWindowFunction,
InternalSingleValueProcessWindowFunction,
PassThroughWindowFunction, AggregateFunction,
NullByteKeySelector, AllWindowFunction,
InternalIterableAllWindowFunction,
ProcessAllWindowFunction,
InternalIterableProcessAllWindowFunction,
BroadcastProcessFunction,
KeyedBroadcastProcessFunction,
InternalSingleValueAllWindowFunction,
PassThroughAllWindowFunction,
InternalSingleValueProcessAllWindowFunction)
from pyflink.datastream.output_tag import OutputTag
from pyflink.datastream.slot_sharing_group import SlotSharingGroup
from pyflink.datastream.state import (ListStateDescriptor, StateDescriptor, ReducingStateDescriptor,
AggregatingStateDescriptor, MapStateDescriptor, ReducingState)
from pyflink.datastream.utils import convert_to_python_obj
from pyflink.datastream.window import (CountTumblingWindowAssigner, CountSlidingWindowAssigner,
CountWindowSerializer, TimeWindowSerializer, Trigger,
WindowAssigner, WindowOperationDescriptor,
GlobalWindowSerializer, MergingWindowAssigner)
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_jarray
__all__ = ['CloseableIterator', 'DataStream', 'KeyedStream', 'ConnectedStreams', 'WindowedStream',
'DataStreamSink', 'CloseableIterator', 'BroadcastStream', 'BroadcastConnectedStream']
WINDOW_STATE_NAME = 'window-contents'
class DataStream(object):
"""
A DataStream represents a stream of elements of the same type. A DataStream can be transformed
into another DataStream by applying a transformation as for example:
::
>>> DataStream.map(MapFunctionImpl())
>>> DataStream.filter(FilterFunctionImpl())
"""
def __init__(self, j_data_stream):
self._j_data_stream = j_data_stream
def get_name(self) -> str:
"""
Gets the name of the current data stream. This name is used by the visualization and logging
during runtime.
:return: Name of the stream.
"""
return self._j_data_stream.getName()
def name(self, name: str) -> 'DataStream':
"""
Sets the name of the current data stream. This name is used by the visualization and logging
during runtime.
:param name: Name of the stream.
:return: The named operator.
"""
self._j_data_stream.name(name)
return self
def uid(self, uid: str) -> 'DataStream':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStream':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStream':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream.setParallelism(parallelism)
return self
def set_max_parallelism(self, max_parallelism: int) -> 'DataStream':
"""
Sets the maximum parallelism of this operator.
The maximum parallelism specifies the upper bound for dynamic scaling. It also defines the
number of key groups used for partitioned state.
:param max_parallelism: Maximum parallelism.
:return: The operator with set maximum parallelism.
"""
self._j_data_stream.setMaxParallelism(max_parallelism)
return self
def get_type(self) -> TypeInformation:
"""
Gets the type of the stream.
:return: The type of the DataStream.
"""
return typeinfo._from_java_type(self._j_data_stream.getType())
def get_execution_environment(self):
"""
Returns the StreamExecutionEnvironment that was used to create this DataStream.
:return: The Execution Environment.
"""
from pyflink.datastream import StreamExecutionEnvironment
return StreamExecutionEnvironment(
j_stream_execution_environment=self._j_data_stream.getExecutionEnvironment())
def get_execution_config(self) -> ExecutionConfig:
return ExecutionConfig(j_execution_config=self._j_data_stream.getExecutionConfig())
def force_non_parallel(self) -> 'DataStream':
"""
Sets the parallelism and maximum parallelism of this operator to one. And mark this operator
cannot set a non-1 degree of parallelism.
:return: The operator with only one parallelism.
"""
self._j_data_stream.forceNonParallel()
return self
def set_buffer_timeout(self, timeout_millis: int) -> 'DataStream':
"""
Sets the buffering timeout for data produced by this operation. The timeout defines how long
data may linger ina partially full buffer before being sent over the network.
Lower timeouts lead to lower tail latencies, but may affect throughput. Timeouts of 1 ms
still sustain high throughput, even for jobs with high parallelism.
A value of '-1' means that the default buffer timeout should be used. A value of '0'
indicates that no buffering should happen, and all records/events should be immediately sent
through the network, without additional buffering.
:param timeout_millis: The maximum time between two output flushes.
:return: The operator with buffer timeout set.
"""
self._j_data_stream.setBufferTimeout(timeout_millis)
return self
def start_new_chain(self) -> 'DataStream':
"""
Starts a new task chain beginning at this operator. This operator will be chained (thread
co-located for increased performance) to any previous tasks even if possible.
:return: The operator with chaining set.
"""
self._j_data_stream.startNewChain()
return self
def disable_chaining(self) -> 'DataStream':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) -> 'DataStream':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name or which contains name and its
resource spec.
:return: This operator.
"""
if isinstance(slot_sharing_group, SlotSharingGroup):
self._j_data_stream.slotSharingGroup(slot_sharing_group.get_java_slot_sharing_group())
else:
self._j_data_stream.slotSharingGroup(slot_sharing_group)
return self
def set_description(self, description: str) -> 'DataStream':
"""
Sets the description for this operator.
Description is used in json plan and web ui, but not in logging and metrics where only
name is available. Description is expected to provide detailed information about the
operator, while name is expected to be more simple, providing summary information only,
so that we can have more user-friendly logging messages and metric tags without losing
useful messages for debugging.
:param description: The description for this operator.
:return: The operator with new description.
.. versionadded:: 1.15.0
"""
self._j_data_stream.setDescription(description)
return self
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a Map transformation on a DataStream. The transformation calls a MapFunction for
each element of the DataStream. Each MapFunction call returns exactly one element.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction) and not callable(func):
raise TypeError("The input must be a MapFunction or a callable function")
class MapProcessFunctionAdapter(ProcessFunction):
def __init__(self, map_func):
if isinstance(map_func, MapFunction):
self._open_func = map_func.open
self._close_func = map_func.close
self._map_func = map_func.map
else:
self._open_func = None
self._close_func = None
self._map_func = map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield self._map_func(value)
return self.process(MapProcessFunctionAdapter(func), output_type) \
.name("Map")
def flat_map(self,
func: Union[Callable, FlatMapFunction],
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a DataStream. The transformation calls a FlatMapFunction
for each element of the DataStream. Each FlatMapFunction call can return any number of
elements including none.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param output_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction) and not callable(func):
raise TypeError("The input must be a FlatMapFunction or a callable function")
class FlatMapProcessFunctionAdapter(ProcessFunction):
def __init__(self, flat_map_func):
if isinstance(flat_map_func, FlatMapFunction):
self._open_func = flat_map_func.open
self._close_func = flat_map_func.close
self._flat_map_func = flat_map_func.flat_map
else:
self._open_func = None
self._close_func = None
self._flat_map_func = flat_map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield from self._flat_map_func(value)
return self.process(FlatMapProcessFunctionAdapter(func), output_type) \
.name("FlatMap")
def key_by(self,
key_selector: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'KeyedStream':
"""
Creates a new KeyedStream that uses the provided key for partitioning its operator states.
:param key_selector: The KeySelector to be used for extracting the key for partitioning.
:param key_type: The type information describing the key type.
:return: The DataStream with partitioned state(i.e. KeyedStream).
"""
if not isinstance(key_selector, KeySelector) and not callable(key_selector):
raise TypeError("Parameter key_selector should be type of KeySelector or a callable "
"function.")
class AddKey(ProcessFunction):
def __init__(self, key_selector):
if isinstance(key_selector, KeySelector):
self._key_selector_open_func = key_selector.open
self._key_selector_close_func = key_selector.close
self._get_key_func = key_selector.get_key
else:
self._key_selector_open_func = None
self._key_selector_close_func = None
self._get_key_func = key_selector
def open(self, runtime_context: RuntimeContext):
if self._key_selector_open_func:
self._key_selector_open_func(runtime_context)
def close(self):
if self._key_selector_close_func:
self._key_selector_close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield Row(self._get_key_func(value), value)
output_type_info = typeinfo._from_java_type(
self._j_data_stream.getTransformation().getOutputType())
if key_type is None:
key_type = Types.PICKLED_BYTE_ARRAY()
gateway = get_gateway()
stream_with_key_info = self.process(
AddKey(key_selector),
output_type=Types.ROW([key_type, output_type_info]))
stream_with_key_info.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_KEY_BY_MAP_OPERATOR_NAME)
JKeyByKeySelector = gateway.jvm.KeyByKeySelector
key_stream = KeyedStream(
stream_with_key_info._j_data_stream.keyBy(
JKeyByKeySelector(),
Types.ROW([key_type]).get_java_type_info()), output_type_info,
self)
return key_stream
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
"""
Applies a Filter transformation on a DataStream. The transformation calls a FilterFunction
for each element of the DataStream and retains only those element for which the function
returns true. Elements for which the function returns false are filtered.
:param func: The FilterFunction that is called for each element of the DataStream.
:return: The filtered DataStream.
"""
if not isinstance(func, FilterFunction) and not callable(func):
raise TypeError("The input must be a FilterFunction or a callable function")
class FilterProcessFunctionAdapter(ProcessFunction):
def __init__(self, filter_func):
if isinstance(filter_func, FilterFunction):
self._open_func = filter_func.open
self._close_func = filter_func.close
self._filter_func = filter_func.filter
else:
self._open_func = None
self._close_func = None
self._filter_func = filter_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
if self._filter_func(value):
yield value
output_type = typeinfo._from_java_type(
self._j_data_stream.getTransformation().getOutputType())
return self.process(FilterProcessFunctionAdapter(func), output_type=output_type) \
.name("Filter")
def window_all(self, window_assigner: WindowAssigner) -> 'AllWindowedStream':
"""
Windows this data stream to a AllWindowedStream, which evaluates windows over a non key
grouped stream. Elements are put into windows by a WindowAssigner. The grouping of
elements is done by window.
A Trigger can be defined to specify when windows are evaluated. However, WindowAssigners
have a default Trigger that is used if a Trigger is not specified.
:param window_assigner: The WindowAssigner that assigns elements to windows.
:return: The trigger windows data stream.
.. versionadded:: 1.16.0
"""
return AllWindowedStream(self, window_assigner)
def union(self, *streams: 'DataStream') -> 'DataStream':
"""
Creates a new DataStream by merging DataStream outputs of the same type with each other. The
DataStreams merged using this operator will be transformed simultaneously.
:param streams: The DataStream to union outputwith.
:return: The DataStream.
"""
j_data_streams = []
for data_stream in streams:
if isinstance(data_stream, KeyedStream):
j_data_streams.append(data_stream._values()._j_data_stream)
else:
j_data_streams.append(data_stream._j_data_stream)
gateway = get_gateway()
JDataStream = gateway.jvm.org.apache.flink.streaming.api.datastream.DataStream
j_data_stream_arr = get_gateway().new_array(JDataStream, len(j_data_streams))
for i in range(len(j_data_streams)):
j_data_stream_arr[i] = j_data_streams[i]
j_united_stream = self._j_data_stream.union(j_data_stream_arr)
return DataStream(j_data_stream=j_united_stream)
@overload
def connect(self, ds: 'DataStream') -> 'ConnectedStreams':
pass
@overload
def connect(self, ds: 'BroadcastStream') -> 'BroadcastConnectedStream':
pass
def connect(self, ds: Union['DataStream', 'BroadcastStream']) \
-> Union['ConnectedStreams', 'BroadcastConnectedStream']:
"""
If ds is a :class:`DataStream`, creates a new :class:`ConnectedStreams` by connecting
DataStream outputs of (possible) different types with each other. The DataStreams connected
using this operator can be used with CoFunctions to apply joint transformations.
If ds is a :class:`BroadcastStream`, creates a new :class:`BroadcastConnectedStream` by
connecting the current :class:`DataStream` with a :class:`BroadcastStream`. The latter can
be created using the :meth:`broadcast` method. The resulting stream can be further processed
using the :meth:`BroadcastConnectedStream.process` method.
:param ds: The DataStream or BroadcastStream with which this stream will be connected.
:return: The ConnectedStreams or BroadcastConnectedStream.
.. versionchanged:: 1.16.0
Support connect BroadcastStream
"""
if isinstance(ds, BroadcastStream):
return BroadcastConnectedStream(
self, ds, cast(BroadcastStream, ds).broadcast_state_descriptors
)
return ConnectedStreams(self, ds)
def shuffle(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are shuffled uniformly
randomly to the next operation.
:return: The DataStream with shuffle partitioning set.
"""
return DataStream(self._j_data_stream.shuffle())
def project(self, *field_indexes: int) -> 'DataStream':
"""
Initiates a Project transformation on a Tuple DataStream.
Note that only Tuple DataStreams can be projected.
:param field_indexes: The field indexes of the input tuples that are retained. The order of
fields in the output tuple corresponds to the order of field indexes.
:return: The projected DataStream.
"""
if not isinstance(self.get_type(), typeinfo.TupleTypeInfo):
raise Exception('Only Tuple DataStreams can be projected.')
gateway = get_gateway()
j_index_arr = gateway.new_array(gateway.jvm.int, len(field_indexes))
for i in range(len(field_indexes)):
j_index_arr[i] = field_indexes[i]
return DataStream(self._j_data_stream.project(j_index_arr))
def rescale(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to a subset of instances of the next operation in a round-robin fashion.
The subset of downstream operations to which the upstream operation sends elements depends
on the degree of parallelism of both the upstream and downstream operation. For example, if
the upstream operation has parallelism 2 and the downstream operation has parallelism 4,
then one upstream operation would distribute elements to two downstream operations. If, on
the other hand, the downstream operation has parallelism 4 then two upstream operations will
distribute to one downstream operation while the other two upstream operations will
distribute to the other downstream operations.
In cases where the different parallelisms are not multiples of each one or several
downstream operations will have a differing number of inputs from upstream operations.
:return: The DataStream with rescale partitioning set.
"""
return DataStream(self._j_data_stream.rescale())
def rebalance(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are distributed evenly
to instances of the next operation in a round-robin fashion.
:return: The DataStream with rebalance partition set.
"""
return DataStream(self._j_data_stream.rebalance())
def forward(self) -> 'DataStream':
"""
Sets the partitioning of the DataStream so that the output elements are forwarded to the
local sub-task of the next operation.
:return: The DataStream with forward partitioning set.
"""
return DataStream(self._j_data_stream.forward())
@overload
def broadcast(self) -> 'DataStream':
pass
@overload
def broadcast(self, broadcast_state_descriptor: MapStateDescriptor,
*other_broadcast_state_descriptors: MapStateDescriptor) -> 'BroadcastStream':
pass
def broadcast(self, broadcast_state_descriptor: Optional[MapStateDescriptor] = None,
*other_broadcast_state_descriptors: MapStateDescriptor) \
-> Union['DataStream', 'BroadcastStream']:
"""
Sets the partitioning of the DataStream so that the output elements are broadcasted to every
parallel instance of the next operation.
If :class:`~state.MapStateDescriptor` s are passed in, it returns a
:class:`BroadcastStream` with :class:`~state.BroadcastState` s implicitly created as the
descriptors specified.
Example:
::
>>> map_state_desc1 = MapStateDescriptor("state1", Types.INT(), Types.INT())
>>> map_state_desc2 = MapStateDescriptor("state2", Types.INT(), Types.STRING())
>>> broadcast_stream = ds1.broadcast(map_state_desc1, map_state_desc2)
>>> broadcast_connected_stream = ds2.connect(broadcast_stream)
:param broadcast_state_descriptor: the first MapStateDescriptor describing BroadcastState.
:param other_broadcast_state_descriptors: the rest of MapStateDescriptors describing
BroadcastStates, if any.
:return: The DataStream with broadcast partitioning set or a BroadcastStream which can be
used in :meth:`connect` to create a BroadcastConnectedStream for further processing of
the elements.
.. versionchanged:: 1.16.0
Support return BroadcastStream
"""
if broadcast_state_descriptor is not None:
args = [broadcast_state_descriptor]
args.extend(other_broadcast_state_descriptors)
for arg in args:
if not isinstance(arg, MapStateDescriptor):
raise TypeError("broadcast_state_descriptor must be MapStateDescriptor")
broadcast_state_descriptors = [arg for arg in args] # type: List[MapStateDescriptor]
return BroadcastStream(cast(DataStream, self.broadcast()), broadcast_state_descriptors)
return DataStream(self._j_data_stream.broadcast())
def process(self, func: ProcessFunction, output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The ProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def assign_timestamps_and_watermarks(self, watermark_strategy: WatermarkStrategy) -> \
'DataStream':
"""
Assigns timestamps to the elements in the data stream and generates watermarks to signal
event time progress. The given {@link WatermarkStrategy} is used to create a
TimestampAssigner and WatermarkGenerator.
:param watermark_strategy: The strategy to generate watermarks based on event timestamps.
:return: The stream after the transformation, with assigned timestamps and watermarks.
"""
if watermark_strategy._timestamp_assigner is not None:
# in case users have specified custom TimestampAssigner, we need to extract and
# generate watermark according to the specified TimestampAssigner.
class TimestampAssignerProcessFunctionAdapter(ProcessFunction):
def __init__(self, timestamp_assigner: TimestampAssigner):
self._extract_timestamp_func = timestamp_assigner.extract_timestamp
def process_element(self, value, ctx: 'ProcessFunction.Context'):
yield value, self._extract_timestamp_func(value, ctx.timestamp())
# step 1: extract the timestamp according to the specified TimestampAssigner
timestamped_data_stream = self.process(
TimestampAssignerProcessFunctionAdapter(watermark_strategy._timestamp_assigner),
Types.TUPLE([self.get_type(), Types.LONG()]))
timestamped_data_stream.name("Extract-Timestamp")
# step 2: assign timestamp and watermark
gateway = get_gateway()
JCustomTimestampAssigner = gateway.jvm.org.apache.flink.streaming.api.functions.python \
.eventtime.CustomTimestampAssigner
j_watermarked_data_stream = (
timestamped_data_stream._j_data_stream.assignTimestampsAndWatermarks(
watermark_strategy._j_watermark_strategy.withTimestampAssigner(
JCustomTimestampAssigner())))
# step 3: remove the timestamp field which is added in step 1
JRemoveTimestampMapFunction = gateway.jvm.org.apache.flink.streaming.api.functions \
.python.eventtime.RemoveTimestampMapFunction
result = DataStream(j_watermarked_data_stream.map(
JRemoveTimestampMapFunction(), self._j_data_stream.getType()))
result.name("Remove-Timestamp")
return result
else:
# if user not specify a TimestampAssigner, then return directly assign the Java
# watermark strategy.
return DataStream(self._j_data_stream.assignTimestampsAndWatermarks(
watermark_strategy._j_watermark_strategy))
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
"""
Partitions a DataStream on the key returned by the selector, using a custom partitioner.
This method takes the key selector to get the key to partition on, and a partitioner that
accepts the key type.
Note that this method works only on single field keys, i.e. the selector cannot return
tuples of fields.
:param partitioner: The partitioner to assign partitions to keys.
:param key_selector: The KeySelector with which the DataStream is partitioned.
:return: The partitioned DataStream.
"""
if not isinstance(partitioner, Partitioner) and not callable(partitioner):
raise TypeError("Parameter partitioner should be type of Partitioner or a callable "
"function.")
if not isinstance(key_selector, KeySelector) and not callable(key_selector):
raise TypeError("Parameter key_selector should be type of KeySelector or a callable "
"function.")
gateway = get_gateway()
class CustomPartitioner(ProcessFunction):
"""
A wrapper class for partition_custom map function. It indicates that it is a partition
custom operation that we need to apply PythonPartitionCustomOperator
to run the map function.
"""
def __init__(self, partitioner, key_selector):
if isinstance(partitioner, Partitioner):
self._partitioner_open_func = partitioner.open
self._partitioner_close_func = partitioner.close
self._partition_func = partitioner.partition
else:
self._partitioner_open_func = None
self._partitioner_close_func = None
self._partition_func = partitioner
if isinstance(key_selector, KeySelector):
self._key_selector_open_func = key_selector.open
self._key_selector_close_func = key_selector.close
self._get_key_func = key_selector.get_key
else:
self._key_selector_open_func = None
self._key_selector_close_func = None
self._get_key_func = key_selector
def open(self, runtime_context: RuntimeContext):
if self._partitioner_open_func:
self._partitioner_open_func(runtime_context)
if self._key_selector_open_func:
self._key_selector_open_func(runtime_context)
self.num_partitions = int(runtime_context.get_job_parameter(
"NUM_PARTITIONS", "-1"))
if self.num_partitions <= 0:
raise ValueError(
"The partition number should be a positive value, got %s"
% self.num_partitions)
def close(self):
if self._partitioner_close_func:
self._partitioner_close_func()
if self._key_selector_close_func:
self._key_selector_close_func()
def process_element(self, value, ctx: 'ProcessFunction.Context'):
partition = self._partition_func(self._get_key_func(value), self.num_partitions)
yield Row(partition, value)
original_type_info = self.get_type()
stream_with_partition_info = self.process(
CustomPartitioner(partitioner, key_selector),
output_type=Types.ROW([Types.INT(), original_type_info]))
stream_with_partition_info.name(
gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.STREAM_PARTITION_CUSTOM_MAP_OPERATOR_NAME)
JPartitionCustomKeySelector = gateway.jvm.PartitionCustomKeySelector
JIdParitioner = gateway.jvm.org.apache.flink.api.java.functions.IdPartitioner
partitioned_stream_with_partition_info = DataStream(
stream_with_partition_info._j_data_stream.partitionCustom(
JIdParitioner(), JPartitionCustomKeySelector()))
partitioned_stream = partitioned_stream_with_partition_info.map(
lambda x: x[1], original_type_info)
partitioned_stream.name(gateway.jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(partitioned_stream._j_data_stream)
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
"""
Adds the given sink to this DataStream. Only streams with sinks added will be executed once
the StreamExecutionEnvironment.execute() method is called.
:param sink_func: The SinkFunction object.
:return: The closed DataStream.
"""
return DataStreamSink(self._j_data_stream.addSink(sink_func.get_java_function()))
def sink_to(self, sink: Sink) -> 'DataStreamSink':
"""
Adds the given sink to this DataStream. Only streams with sinks added will be
executed once the
:func:`~pyflink.datastream.stream_execution_environment.StreamExecutionEnvironment.execute`
method is called.
:param sink: The user defined sink.
:return: The closed DataStream.
"""
ds = self
from pyflink.datastream.connectors.base import SupportsPreprocessing
if isinstance(sink, SupportsPreprocessing) and sink.get_transformer() is not None:
ds = sink.get_transformer().apply(self)
return DataStreamSink(ds._j_data_stream.sinkTo(sink.get_java_function()))
def execute_and_collect(self, job_execution_name: str = None, limit: int = None) \
-> Union['CloseableIterator', list]:
"""
Triggers the distributed execution of the streaming dataflow and returns an iterator over
the elements of the given DataStream.
The DataStream application is executed in the regular distributed manner on the target
environment, and the events from the stream are polled back to this application process and
thread through Flink's REST API.
The returned iterator must be closed to free all cluster resources.
:param job_execution_name: The name of the job execution.
:param limit: The limit for the collected elements.
"""
JPythonConfigUtil = get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
JPythonConfigUtil.configPythonOperator(self._j_data_stream.getExecutionEnvironment())
self._apply_chaining_optimization()
if job_execution_name is None and limit is None:
return CloseableIterator(self._j_data_stream.executeAndCollect(), self.get_type())
elif job_execution_name is not None and limit is None:
return CloseableIterator(self._j_data_stream.executeAndCollect(job_execution_name),
self.get_type())
if job_execution_name is None and limit is not None:
return list(map(lambda data: convert_to_python_obj(data, self.get_type()),
self._j_data_stream.executeAndCollect(limit)))
else:
return list(map(lambda data: convert_to_python_obj(data, self.get_type()),
self._j_data_stream.executeAndCollect(job_execution_name, limit)))
def print(self, sink_identifier: str = None) -> 'DataStreamSink':
"""
Writes a DataStream to the standard output stream (stdout).
For each element of the DataStream the object string is written.
NOTE: This will print to stdout on the machine where the code is executed, i.e. the Flink
worker, and is not fault tolerant.
:param sink_identifier: The string to prefix the output with.
:return: The closed DataStream.
"""
if sink_identifier is not None:
j_data_stream_sink = self._align_output_type()._j_data_stream.print(sink_identifier)
else:
j_data_stream_sink = self._align_output_type()._j_data_stream.print()
return DataStreamSink(j_data_stream_sink)
def get_side_output(self, output_tag: OutputTag) -> 'DataStream':
"""
Gets the :class:`DataStream` that contains the elements that are emitted from an operation
into the side output with the given :class:`OutputTag`.
:param output_tag: output tag for the side stream
:return: The DataStream with specified output tag
.. versionadded:: 1.16.0
"""
ds = DataStream(self._j_data_stream.getSideOutput(output_tag.get_java_output_tag()))
return ds.map(lambda i: i, output_type=output_tag.type_info)
def cache(self) -> 'CachedDataStream':
"""
Cache the intermediate result of the transformation. Only support bounded streams and
currently only block mode is supported. The cache is generated lazily at the first time the
intermediate result is computed. The cache will be clear when the StreamExecutionEnvironment
close.
:return: The cached DataStream that can use in later job to reuse the cached intermediate
result.
.. versionadded:: 1.16.0
"""
return CachedDataStream(self._j_data_stream.cache())
def _apply_chaining_optimization(self):
"""
Chain the Python operators if possible.
"""
gateway = get_gateway()
JPythonOperatorChainingOptimizer = gateway.jvm.org.apache.flink.python.chain. \
PythonOperatorChainingOptimizer
j_transformation = JPythonOperatorChainingOptimizer.apply(
self._j_data_stream.getExecutionEnvironment(),
self._j_data_stream.getTransformation())
self._j_data_stream = gateway.jvm.org.apache.flink.streaming.api.datastream.DataStream(
self._j_data_stream.getExecutionEnvironment(), j_transformation)
def _align_output_type(self) -> 'DataStream':
"""
Transform the pickled python object into String if the output type is PickledByteArrayInfo.
"""
from py4j.java_gateway import get_java_class
gateway = get_gateway()
ExternalTypeInfo_CLASS = get_java_class(
gateway.jvm.org.apache.flink.table.runtime.typeutils.ExternalTypeInfo)
RowTypeInfo_CLASS = get_java_class(
gateway.jvm.org.apache.flink.api.java.typeutils.RowTypeInfo)
output_type_info_class = self._j_data_stream.getTransformation().getOutputType().getClass()
if output_type_info_class.isAssignableFrom(
Types.PICKLED_BYTE_ARRAY().get_java_type_info()
.getClass()):
def python_obj_to_str_map_func(value):
if not isinstance(value, (str, bytes)):
value = str(value)
return value
transformed_data_stream = DataStream(
self.map(python_obj_to_str_map_func,
output_type=Types.STRING())._j_data_stream)
return transformed_data_stream
elif (output_type_info_class.isAssignableFrom(ExternalTypeInfo_CLASS) or
output_type_info_class.isAssignableFrom(RowTypeInfo_CLASS)):
def python_obj_to_str_map_func(value):
assert isinstance(value, Row)
return '{}[{}]'.format(value.get_row_kind(),
','.join([str(item) for item in value._values]))
transformed_data_stream = DataStream(
self.map(python_obj_to_str_map_func,
output_type=Types.STRING())._j_data_stream)
return transformed_data_stream
else:
return self
class DataStreamSink(object):
"""
A Stream Sink. This is used for emitting elements from a streaming topology.
"""
def __init__(self, j_data_stream_sink):
"""
The constructor of DataStreamSink.
:param j_data_stream_sink: A DataStreamSink java object.
"""
self._j_data_stream_sink = j_data_stream_sink
def name(self, name: str) -> 'DataStreamSink':
"""
Sets the name of this sink. THis name is used by the visualization and logging during
runtime.
:param name: The name of this sink.
:return: The named sink.
"""
self._j_data_stream_sink.name(name)
return self
def uid(self, uid: str) -> 'DataStreamSink':
"""
Sets an ID for this operator. The specified ID is used to assign the same operator ID across
job submissions (for example when starting a job from a savepoint).
Important: this ID needs to be unique per transformation and job. Otherwise, job submission
will fail.
:param uid: The unique user-specified ID of this transformation.
:return: The operator with the specified ID.
"""
self._j_data_stream_sink.uid(uid)
return self
def set_uid_hash(self, uid_hash: str) -> 'DataStreamSink':
"""
Sets an user provided hash for this operator. This will be used AS IS the create the
JobVertexID. The user provided hash is an alternative to the generated hashed, that is
considered when identifying an operator through the default hash mechanics fails (e.g.
because of changes between Flink versions).
Important: this should be used as a workaround or for trouble shooting. The provided hash
needs to be unique per transformation and job. Otherwise, job submission will fail.
Furthermore, you cannot assign user-specified hash to intermediate nodes in an operator
chain and trying so will let your job fail.
A use case for this is in migration between Flink versions or changing the jobs in a way
that changes the automatically generated hashes. In this case, providing the previous hashes
directly through this method (e.g. obtained from old logs) can help to reestablish a lost
mapping from states to their target operator.
:param uid_hash: The user provided hash for this operator. This will become the jobVertexID,
which is shown in the logs and web ui.
:return: The operator with the user provided hash.
"""
self._j_data_stream_sink.setUidHash(uid_hash)
return self
def set_parallelism(self, parallelism: int) -> 'DataStreamSink':
"""
Sets the parallelism for this operator.
:param parallelism: THe parallelism for this operator.
:return: The operator with set parallelism.
"""
self._j_data_stream_sink.setParallelism(parallelism)
return self
def set_description(self, description: str) -> 'DataStreamSink':
"""
Sets the description for this sink.
Description is used in json plan and web ui, but not in logging and metrics where only
name is available. Description is expected to provide detailed information about the sink,
while name is expected to be more simple, providing summary information only, so that we can
have more user-friendly logging messages and metric tags without losing useful messages for
debugging.
:param description: The description for this sink.
:return: The sink with new description.
.. versionadded:: 1.15.0
"""
self._j_data_stream_sink.setDescription(description)
return self
def disable_chaining(self) -> 'DataStreamSink':
"""
Turns off chaining for this operator so thread co-location will not be used as an
optimization.
Chaining can be turned off for the whole job by
StreamExecutionEnvironment.disableOperatorChaining() however it is not advised for
performance consideration.
:return: The operator with chaining disabled.
"""
self._j_data_stream_sink.disableChaining()
return self
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) \
-> 'DataStreamSink':
"""
Sets the slot sharing group of this operation. Parallel instances of operations that are in
the same slot sharing group will be co-located in the same TaskManager slot, if possible.
Operations inherit the slot sharing group of input operations if all input operations are in
the same slot sharing group and no slot sharing group was explicitly specified.
Initially an operation is in the default slot sharing group. An operation can be put into
the default group explicitly by setting the slot sharing group to 'default'.
:param slot_sharing_group: The slot sharing group name or which contains name and its
resource spec.
:return: This operator.
"""
if isinstance(slot_sharing_group, SlotSharingGroup):
self._j_data_stream_sink.slotSharingGroup(
slot_sharing_group.get_java_slot_sharing_group())
else:
self._j_data_stream_sink.slotSharingGroup(slot_sharing_group)
return self
class KeyedStream(DataStream):
"""
A KeyedStream represents a DataStream on which operator state is partitioned by key using a
provided KeySelector. Typical operations supported by a DataStream are also possible on a
KeyedStream, with the exception of partitioning methods such as shuffle, forward and keyBy.
Reduce-style operations, such as reduce and sum work on elements that have the same key.
"""
def __init__(self, j_keyed_stream, original_data_type_info, origin_stream: DataStream):
"""
Constructor of KeyedStream.
:param j_keyed_stream: A java KeyedStream object.
:param original_data_type_info: Original data typeinfo.
:param origin_stream: The DataStream before key by.
"""
super(KeyedStream, self).__init__(j_data_stream=j_keyed_stream)
self._original_data_type_info = original_data_type_info
self._origin_stream = origin_stream
def map(self, func: Union[Callable, MapFunction], output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a Map transformation on a KeyedStream. The transformation calls a MapFunction for
each element of the DataStream. Each MapFunction call returns exactly one element.
Note that If user does not specify the output data type, the output data will be serialized
as pickle primitive byte array.
:param func: The MapFunction that is called for each element of the DataStream.
:param output_type: The type information of the MapFunction output data.
:return: The transformed DataStream.
"""
if not isinstance(func, MapFunction) and not callable(func):
raise TypeError("The input must be a MapFunction or a callable function")
class MapKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, map_func):
if isinstance(map_func, MapFunction):
self._open_func = map_func.open
self._close_func = map_func.close
self._map_func = map_func.map
else:
self._open_func = None
self._close_func = None
self._map_func = map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield self._map_func(value)
return self.process(MapKeyedProcessFunctionAdapter(func), output_type) \
.name("Map") # type: ignore
def flat_map(self,
func: Union[Callable, FlatMapFunction],
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a FlatMap transformation on a KeyedStream. The transformation calls a
FlatMapFunction for each element of the DataStream. Each FlatMapFunction call can return
any number of elements including none.
:param func: The FlatMapFunction that is called for each element of the DataStream.
:param output_type: The type information of output data.
:return: The transformed DataStream.
"""
if not isinstance(func, FlatMapFunction) and not callable(func):
raise TypeError("The input must be a FlatMapFunction or a callable function")
class FlatMapKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, flat_map_func):
if isinstance(flat_map_func, FlatMapFunction):
self._open_func = flat_map_func.open
self._close_func = flat_map_func.close
self._flat_map_func = flat_map_func.flat_map
else:
self._open_func = None
self._close_func = None
self._flat_map_func = flat_map_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
yield from self._flat_map_func(value)
return self.process(FlatMapKeyedProcessFunctionAdapter(func), output_type) \
.name("FlatMap")
def reduce(self, func: Union[Callable, ReduceFunction]) -> 'DataStream':
"""
Applies a reduce transformation on the grouped data stream grouped on by the given
key position. The `ReduceFunction` will receive input values based on the key value.
Only input values with the same key will go to the same reducer.
Example:
::
>>> ds = env.from_collection([(1, 'a'), (2, 'a'), (3, 'a'), (4, 'b'])
>>> ds.key_by(lambda x: x[1]).reduce(lambda a, b: a[0] + b[0], b[1])
:param func: The ReduceFunction that is called for each element of the DataStream.
:return: The transformed DataStream.
"""
if not isinstance(func, ReduceFunction) and not callable(func):
raise TypeError("The input must be a ReduceFunction or a callable function")
output_type = _from_java_type(self._original_data_type_info.get_java_type_info())
gateway = get_gateway()
j_conf = get_j_env_configuration(self._j_data_stream.getExecutionEnvironment())
python_execution_mode = (
j_conf.getString(
gateway.jvm.org.apache.flink.python.PythonOptions.PYTHON_EXECUTION_MODE))
class ReduceProcessKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, reduce_function):
if isinstance(reduce_function, ReduceFunction):
self._open_func = reduce_function.open
self._close_func = reduce_function.close
self._reduce_function = reduce_function.reduce
else:
self._open_func = None
self._close_func = None
self._reduce_function = reduce_function
self._reduce_state = None # type: ReducingState
self._in_batch_execution_mode = True
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
self._reduce_state = runtime_context.get_reducing_state(
ReducingStateDescriptor(
"_reduce_state" + str(uuid.uuid4()),
self._reduce_function,
output_type))
if python_execution_mode == "process":
from pyflink.fn_execution.datastream.process.runtime_context import (
StreamingRuntimeContext)
self._in_batch_execution_mode = (
cast(StreamingRuntimeContext, runtime_context)._in_batch_execution_mode)
else:
self._in_batch_execution_mode = runtime_context.get_job_parameter(
"inBatchExecutionMode", "false") == "true"
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
if self._in_batch_execution_mode:
reduce_value = self._reduce_state.get()
if reduce_value is None:
# register a timer for emitting the result at the end when this is the
# first input for this key
ctx.timer_service().register_event_time_timer(0x7fffffffffffffff)
self._reduce_state.add(value)
else:
self._reduce_state.add(value)
# only emitting the result when all the data for a key is received
yield self._reduce_state.get()
def on_timer(self, timestamp: int, ctx: 'KeyedProcessFunction.OnTimerContext'):
current_value = self._reduce_state.get()
if current_value is not None:
yield current_value
return self.process(ReduceProcessKeyedProcessFunctionAdapter(func), output_type) \
.name("Reduce")
def filter(self, func: Union[Callable, FilterFunction]) -> 'DataStream':
if not isinstance(func, FilterFunction) and not callable(func):
raise TypeError("The input must be a FilterFunction or a callable function")
class FilterKeyedProcessFunctionAdapter(KeyedProcessFunction):
def __init__(self, filter_func):
if isinstance(filter_func, FilterFunction):
self._open_func = filter_func.open
self._close_func = filter_func.close
self._filter_func = filter_func.filter
else:
self._open_func = None
self._close_func = None
self._filter_func = filter_func
def open(self, runtime_context: RuntimeContext):
if self._open_func:
self._open_func(runtime_context)
def close(self):
if self._close_func:
self._close_func()
def process_element(self, value, ctx: 'KeyedProcessFunction.Context'):
if self._filter_func(value):
yield value
return self.process(FilterKeyedProcessFunctionAdapter(func), self._original_data_type_info)\
.name("Filter")
class AccumulateType(Enum):
MIN = 1
MAX = 2
MIN_BY = 3
MAX_BY = 4
SUM = 5
def _accumulate(self, position: Union[int, str], acc_type: AccumulateType):
"""
The base method is used for operators such as min, max, min_by, max_by, sum.
"""
if not isinstance(position, int) and not isinstance(position, str):
raise TypeError("The field position must be of int or str type to locate the value to "
"calculate for min, max, min_by, max_by and sum."
"The given type is: %s" % type(position))
class AccumulateReduceFunction(ReduceFunction):
def __init__(self, position, agg_type):
self._pos = position
self._agg_type = agg_type
self._reduce_func = None
def reduce(self, value1, value2):
def init_reduce_func(value_to_check):
if acc_type == KeyedStream.AccumulateType.MIN_BY:
# Logic for min_by operator.
def reduce_func(v1, v2):
if isinstance(value_to_check, (tuple, list, Row)):
return v2 if v2[self._pos] < v1[self._pos] else v1
else:
return v2 if v2 < v1 else v1
self._reduce_func = reduce_func
elif acc_type == KeyedStream.AccumulateType.MAX_BY:
# Logic for max_by operator.
def reduce_func(v1, v2):
if isinstance(value_to_check, (tuple, list, Row)):
return v2 if v2[self._pos] > v1[self._pos] else v1
else:
return v2 if v2 > v1 else v1
self._reduce_func = reduce_func
# for MIN / MAX / SUM
elif isinstance(value_to_check, tuple):
def reduce_func(v1, v2):
v1_list = list(v1)
if acc_type == KeyedStream.AccumulateType.MIN:
# Logic for min operator with tuple type input.
v1_list[self._pos] = v2[self._pos] \
if v2[self._pos] < v1[self._pos] else v1[self._pos]
elif acc_type == KeyedStream.AccumulateType.MAX:
# Logic for max operator with tuple type input.
v1_list[self._pos] = v2[self._pos] \
if v2[self._pos] > v1[self._pos] else v1[self._pos]
else:
# Logic for sum operator with tuple type input.
v1_list[self._pos] = v1[self._pos] + v2[self._pos]
return tuple(v1_list)
return tuple(v1_list)
self._reduce_func = reduce_func
elif isinstance(value_to_check, (list, Row)):
def reduce_func(v1, v2):
if acc_type == KeyedStream.AccumulateType.MIN:
# Logic for min operator with List and Row types input.
v1[self._pos] = v2[self._pos] \
if v2[self._pos] < v1[self._pos] else v1[self._pos]
elif acc_type == KeyedStream.AccumulateType.MAX:
# Logic for max operator with List and Row types input.
v1[self._pos] = v2[self._pos] \
if v2[self._pos] > v1[self._pos] else v1[self._pos]
else:
# Logic for sum operator with List and Row types input.
v1[self._pos] = v1[self._pos] + v2[self._pos]
return v1
self._reduce_func = reduce_func
else:
if self._pos != 0:
raise TypeError(
"The %s field selected on a basic type. A field expression "
"on a basic type can only select the 0th field (which means "
"selecting the entire basic type)." % self._pos)
def reduce_func(v1, v2):
if acc_type == KeyedStream.AccumulateType.MIN:
# Logic for min operator with basic type input.
return v2 if v2 < v1 else v1
elif acc_type == KeyedStream.AccumulateType.MAX:
# Logic for max operator with basic type input.
return v2 if v2 > v1 else v1
else:
# Logic for sum operator with basic type input.
return v1 + v2
self._reduce_func = reduce_func
if not self._reduce_func:
init_reduce_func(value2)
return self._reduce_func(value1, value2)
return self.reduce(AccumulateReduceFunction(position, acc_type))
def sum(self, position_to_sum: Union[int, str] = 0) -> 'DataStream':
"""
Applies an aggregation that gives a rolling sum of the data stream at the given position
grouped by the given key. An independent aggregate is kept per key.
Example(Tuple data to sum):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('b', 1), ('b', 5)])
>>> ds.key_by(lambda x: x[0]).sum(1)
Example(Row data to sum):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW([Types.STRING(), Types.INT()]))
>>> ds.key_by(lambda x: x[0]).sum(1)
Example(Row data with fields name to sum):
::
>>> ds = env.from_collection(
... [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW_NAMED(["key", "value"], [Types.STRING(), Types.INT()])
... )
>>> ds.key_by(lambda x: x[0]).sum("value")
:param position_to_sum: The field position in the data points to sum, type can be int which
indicates the index of the column to operate on or str which
indicates the name of the column to operate on.
:return: The transformed DataStream.
.. versionadded:: 1.16.0
"""
return self._accumulate(position_to_sum, KeyedStream.AccumulateType.SUM)
def min(self, position_to_min: Union[int, str] = 0) -> 'DataStream':
"""
Applies an aggregation that gives the current minimum of the data stream at the given
position by the given key. An independent aggregate is kept per key.
Example(Tuple data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('b', 1), ('b', 5)])
>>> ds.key_by(lambda x: x[0]).min(1)
Example(Row data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW([Types.STRING(), Types.INT()]))
>>> ds.key_by(lambda x: x[0]).min(1)
Example(Row data with fields name):
::
>>> ds = env.from_collection(
... [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW_NAMED(["key", "value"], [Types.STRING(), Types.INT()])
... )
>>> ds.key_by(lambda x: x[0]).min("value")
:param position_to_min: The field position in the data points to minimize. The type can be
int (field position) or str (field name). This is applicable to
Tuple types, List types, Row types, and basic types (which is
considered as having one field).
:return: The transformed DataStream.
.. versionadded:: 1.16.0
"""
return self._accumulate(position_to_min, KeyedStream.AccumulateType.MIN)
def max(self, position_to_max: Union[int, str] = 0) -> 'DataStream':
"""
Applies an aggregation that gives the current maximize of the data stream at the given
position by the given key. An independent aggregate is kept per key.
Example(Tuple data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('b', 1), ('b', 5)])
>>> ds.key_by(lambda x: x[0]).max(1)
Example(Row data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW([Types.STRING(), Types.INT()]))
>>> ds.key_by(lambda x: x[0]).max(1)
Example(Row data with fields name):
::
>>> ds = env.from_collection(
... [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW_NAMED(["key", "value"], [Types.STRING(), Types.INT()])
... )
>>> ds.key_by(lambda x: x[0]).max("value")
:param position_to_max: The field position in the data points to maximize. The type can be
int (field position) or str (field name). This is applicable to
Tuple types, List types, Row types, and basic types (which is
considered as having one field).
:return: The transformed DataStream.
.. versionadded:: 1.16.0
"""
return self._accumulate(position_to_max, KeyedStream.AccumulateType.MAX)
def min_by(self, position_to_min_by: Union[int, str] = 0) -> 'DataStream':
"""
Applies an aggregation that gives the current element with the minimum value at the
given position by the given key. An independent aggregate is kept per key.
If more elements have the minimum value at the given position,
the operator returns the first one by default.
Example(Tuple data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('b', 1), ('b', 5)])
>>> ds.key_by(lambda x: x[0]).min_by(1)
Example(Row data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW([Types.STRING(), Types.INT()]))
>>> ds.key_by(lambda x: x[0]).min_by(1)
Example(Row data with fields name):
::
>>> ds = env.from_collection(
... [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW_NAMED(["key", "value"], [Types.STRING(), Types.INT()])
... )
>>> ds.key_by(lambda x: x[0]).min_by("value")
:param position_to_min_by: The field position in the data points to minimize. The type can
be int (field position) or str (field name). This is applicable
to Tuple types, List types, Row types, and basic types (which is
considered as having one field).
:return: The transformed DataStream.
.. versionadded:: 1.16.0
"""
return self._accumulate(position_to_min_by, KeyedStream.AccumulateType.MIN_BY)
def max_by(self, position_to_max_by: Union[int, str] = 0) -> 'DataStream':
"""
Applies an aggregation that gives the current element with the maximize value at the
given position by the given key. An independent aggregate is kept per key.
If more elements have the maximize value at the given position,
the operator returns the first one by default.
Example(Tuple data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('b', 1), ('b', 5)])
>>> ds.key_by(lambda x: x[0]).max_by(1)
Example(Row data):
::
>>> ds = env.from_collection([('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW([Types.STRING(), Types.INT()]))
>>> ds.key_by(lambda x: x[0]).max_by(1)
Example(Row data with fields name):
::
>>> ds = env.from_collection(
... [('a', 1), ('a', 2), ('a', 3), ('b', 1), ('b', 2)],
... type_info=Types.ROW_NAMED(["key", "value"], [Types.STRING(), Types.INT()])
... )
>>> ds.key_by(lambda x: x[0]).max_by("value")
:param position_to_max_by: The field position in the data points to maximize. The type can
be int (field position) or str (field name). This is applicable
to Tuple types, List types, Row types, and basic types (which is
considered as having one field).
:return: The transformed DataStream.
.. versionadded:: 1.16.0
"""
return self._accumulate(position_to_max_by, KeyedStream.AccumulateType.MAX_BY)
def add_sink(self, sink_func: SinkFunction) -> 'DataStreamSink':
return self._values().add_sink(sink_func)
def key_by(self, key_selector: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'KeyedStream':
return self._origin_stream.key_by(key_selector, key_type)
def process(self, func: KeyedProcessFunction, # type: ignore
output_type: TypeInformation = None) -> 'DataStream':
"""
Applies the given ProcessFunction on the input stream, thereby creating a transformed output
stream.
The function will be called for every element in the input streams and can produce zero or
more output elements.
:param func: The KeyedProcessFunction that is called for each element in the stream.
:param output_type: TypeInformation for the result type of the function.
:return: The transformed DataStream.
"""
if not isinstance(func, KeyedProcessFunction):
raise TypeError("KeyedProcessFunction is required for KeyedStream.")
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self,
func,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.KEYED_PROCESS, # type: ignore
output_type)
return DataStream(self._j_data_stream.transform(
"KEYED PROCESS",
j_output_type_info,
j_python_data_stream_function_operator))
def window(self, window_assigner: WindowAssigner) -> 'WindowedStream':
"""
Windows this data stream to a WindowedStream, which evaluates windows over a key
grouped stream. Elements are put into windows by a WindowAssigner. The grouping of
elements is done both by key and by window.
A Trigger can be defined to specify when windows are evaluated. However, WindowAssigners
have a default Trigger that is used if a Trigger is not specified.
:param window_assigner: The WindowAssigner that assigns elements to windows.
:return: The trigger windows data stream.
"""
return WindowedStream(self, window_assigner)
def count_window(self, size: int, slide: int = 0):
"""
Windows this KeyedStream into tumbling or sliding count windows.
:param size: The size of the windows in number of elements.
:param slide: The slide interval in number of elements.
.. versionadded:: 1.16.0
"""
if slide == 0:
return WindowedStream(self, CountTumblingWindowAssigner(size))
else:
return WindowedStream(self, CountSlidingWindowAssigner(size, slide))
def union(self, *streams) -> 'DataStream':
return self._values().union(*streams)
@overload
def connect(self, ds: 'DataStream') -> 'ConnectedStreams':
pass
@overload
def connect(self, ds: 'BroadcastStream') -> 'BroadcastConnectedStream':
pass
def connect(self, ds: Union['DataStream', 'BroadcastStream']) \
-> Union['ConnectedStreams', 'BroadcastConnectedStream']:
"""
If ds is a :class:`DataStream`, creates a new :class:`ConnectedStreams` by connecting
DataStream outputs of (possible) different types with each other. The DataStreams connected
using this operator can be used with CoFunctions to apply joint transformations.
If ds is a :class:`BroadcastStream`, creates a new :class:`BroadcastConnectedStream` by
connecting the current :class:`DataStream` with a :class:`BroadcastStream`. The latter can
be created using the :meth:`broadcast` method. The resulting stream can be further processed
using the :meth:`BroadcastConnectedStream.process` method.
:param ds: The DataStream or BroadcastStream with which this stream will be connected.
:return: The ConnectedStreams or BroadcastConnectedStream.
.. versionchanged:: 1.16.0
Support connect BroadcastStream
"""
return super().connect(ds)
def shuffle(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def project(self, *field_indexes) -> 'DataStream':
return self._values().project(*field_indexes)
def rescale(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def rebalance(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def forward(self) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def broadcast(self, *args):
"""
Not supported, partitioning for KeyedStream cannot be overridden.
"""
raise Exception('Cannot override partitioning for KeyedStream.')
def partition_custom(self, partitioner: Union[Callable, Partitioner],
key_selector: Union[Callable, KeySelector]) -> 'DataStream':
raise Exception('Cannot override partitioning for KeyedStream.')
def print(self, sink_identifier=None):
return self._values().print()
def _values(self) -> 'DataStream':
"""
Since python KeyedStream is in the format of Row(key_value, original_data), it is used for
getting the original_data.
"""
transformed_stream = self.map(lambda x: x, output_type=self._original_data_type_info)
transformed_stream.name(get_gateway().jvm.org.apache.flink.python.util.PythonConfigUtil
.KEYED_STREAM_VALUE_OPERATOR_NAME)
return DataStream(transformed_stream._j_data_stream)
def set_parallelism(self, parallelism: int):
raise Exception("Set parallelism for KeyedStream is not supported.")
def name(self, name: str):
raise Exception("Set name for KeyedStream is not supported.")
def get_name(self) -> str:
raise Exception("Get name of KeyedStream is not supported.")
def uid(self, uid: str):
raise Exception("Set uid for KeyedStream is not supported.")
def set_uid_hash(self, uid_hash: str):
raise Exception("Set uid hash for KeyedStream is not supported.")
def set_max_parallelism(self, max_parallelism: int):
raise Exception("Set max parallelism for KeyedStream is not supported.")
def force_non_parallel(self):
raise Exception("Set force non-parallel for KeyedStream is not supported.")
def set_buffer_timeout(self, timeout_millis: int):
raise Exception("Set buffer timeout for KeyedStream is not supported.")
def start_new_chain(self) -> 'DataStream':
raise Exception("Start new chain for KeyedStream is not supported.")
def disable_chaining(self) -> 'DataStream':
raise Exception("Disable chaining for KeyedStream is not supported.")
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) -> 'DataStream':
raise Exception("Setting slot sharing group for KeyedStream is not supported.")
def cache(self) -> 'CachedDataStream':
raise Exception("Cache for KeyedStream is not supported.")
class CachedDataStream(DataStream):
"""
CachedDataStream represents a DataStream whose intermediate result will be cached at the first
time when it is computed. And the cached intermediate result can be used in later job that using
the same CachedDataStream to avoid re-computing the intermediate result.
"""
def __init__(self, j_data_stream):
super(CachedDataStream, self).__init__(j_data_stream)
def invalidate(self):
"""
Invalidate the cache intermediate result of this DataStream to release the physical
resources. Users are not required to invoke this method to release physical resources unless
they want to. Cache will be recreated if it is used after invalidated.
.. versionadded:: 1.16.0
"""
self._j_data_stream.invalidate()
def set_parallelism(self, parallelism: int):
raise Exception("Set parallelism for CachedDataStream is not supported.")
def name(self, name: str):
raise Exception("Set name for CachedDataStream is not supported.")
def get_name(self) -> str:
raise Exception("Get name of CachedDataStream is not supported.")
def uid(self, uid: str):
raise Exception("Set uid for CachedDataStream is not supported.")
def set_uid_hash(self, uid_hash: str):
raise Exception("Set uid hash for CachedDataStream is not supported.")
def set_max_parallelism(self, max_parallelism: int):
raise Exception("Set max parallelism for CachedDataStream is not supported.")
def force_non_parallel(self):
raise Exception("Set force non-parallel for CachedDataStream is not supported.")
def set_buffer_timeout(self, timeout_millis: int):
raise Exception("Set buffer timeout for CachedDataStream is not supported.")
def start_new_chain(self) -> 'DataStream':
raise Exception("Start new chain for CachedDataStream is not supported.")
def disable_chaining(self) -> 'DataStream':
raise Exception("Disable chaining for CachedDataStream is not supported.")
def slot_sharing_group(self, slot_sharing_group: Union[str, SlotSharingGroup]) -> 'DataStream':
raise Exception("Setting slot sharing group for CachedDataStream is not supported.")
class WindowedStream(object):
"""
A WindowedStream represents a data stream where elements are grouped by key, and for each
key, the stream of elements is split into windows based on a WindowAssigner. Window emission
is triggered based on a Trigger.
The windows are conceptually evaluated for each key individually, meaning windows can trigger
at different points for each key.
Note that the WindowedStream is purely an API construct, during runtime the WindowedStream will
be collapsed together with the KeyedStream and the operation over the window into one single
operation.
"""
def __init__(self, keyed_stream: KeyedStream, window_assigner: WindowAssigner):
self._keyed_stream = keyed_stream
self._window_assigner = window_assigner
self._allowed_lateness = 0
self._late_data_output_tag = None # type: Optional[OutputTag]
self._window_trigger = None # type: Trigger
def get_execution_environment(self):
return self._keyed_stream.get_execution_environment()
def get_input_type(self):
return _from_java_type(self._keyed_stream._original_data_type_info.get_java_type_info())
def trigger(self, trigger: Trigger) -> 'WindowedStream':
"""
Sets the Trigger that should be used to trigger window emission.
"""
self._window_trigger = trigger
return self
def allowed_lateness(self, time_ms: int) -> 'WindowedStream':
"""
Sets the time by which elements are allowed to be late. Elements that arrive behind the
watermark by more than the specified time will be dropped. By default, the allowed lateness
is 0.
Setting an allowed lateness is only valid for event-time windows.
"""
self._allowed_lateness = time_ms
return self
def side_output_late_data(self, output_tag: OutputTag) -> 'WindowedStream':
"""
Send late arriving data to the side output identified by the given :class:`OutputTag`. Data
is considered late after the watermark has passed the end of the window plus the allowed
lateness set using :func:`allowed_lateness`.
You can get the stream of late data using :func:`~DataStream.get_side_output` on the
:class:`DataStream` resulting from the windowed operation with the same :class:`OutputTag`.
Example:
::
>>> tag = OutputTag("late-data", Types.TUPLE([Types.INT(), Types.STRING()]))
>>> main_stream = ds.key_by(lambda x: x[1]) \\
... .window(TumblingEventTimeWindows.of(Time.seconds(5))) \\
... .side_output_late_data(tag) \\
... .reduce(lambda a, b: a[0] + b[0], b[1])
>>> late_stream = main_stream.get_side_output(tag)
.. versionadded:: 1.16.0
"""
self._late_data_output_tag = output_tag
return self
def reduce(self,
reduce_function: Union[Callable, ReduceFunction],
window_function: Union[WindowFunction, ProcessWindowFunction] = None,
output_type: TypeInformation = None) -> DataStream:
"""
Applies a reduce function to the window. The window function is called for each evaluation
of the window for each key individually. The output of the reduce function is interpreted as
a regular non-windowed stream.
This window will try and incrementally aggregate data as much as the window policies
permit. For example, tumbling time windows can aggregate the data, meaning that only one
element per key is stored. Sliding time windows will aggregate on the granularity of the
slide interval, so a few elements are stored per key (one per slide interval). Custom
windows may not be able to incrementally aggregate, or may need to store extra values in an
aggregation tree.
Example:
::
>>> ds.key_by(lambda x: x[1]) \\
... .window(TumblingEventTimeWindows.of(Time.seconds(5))) \\
... .reduce(lambda a, b: a[0] + b[0], b[1])
:param reduce_function: The reduce function.
:param window_function: The window function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the reduce function to the window.
.. versionadded:: 1.16.0
"""
if window_function is None:
internal_window_function = InternalSingleValueWindowFunction(
PassThroughWindowFunction()) # type: InternalWindowFunction
if output_type is None:
output_type = self.get_input_type()
elif isinstance(window_function, WindowFunction):
internal_window_function = InternalSingleValueWindowFunction(window_function)
elif isinstance(window_function, ProcessWindowFunction):
internal_window_function = InternalSingleValueProcessWindowFunction(window_function)
else:
raise TypeError("window_function should be a WindowFunction or ProcessWindowFunction")
reducing_state_descriptor = ReducingStateDescriptor(WINDOW_STATE_NAME,
reduce_function,
self.get_input_type())
func_desc = type(reduce_function).__name__
if window_function is not None:
func_desc = "%s, %s" % (func_desc, type(window_function).__name__)
return self._get_result_data_stream(internal_window_function,
reducing_state_descriptor,
func_desc,
output_type)
def aggregate(self,
aggregate_function: AggregateFunction,
window_function: Union[WindowFunction, ProcessWindowFunction] = None,
accumulator_type: TypeInformation = None,
output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Arriving data is incrementally aggregated using the given aggregate function. This means
that the window function typically has only a single value to process when called.
Example:
::
>>> class AverageAggregate(AggregateFunction):
... def create_accumulator(self) -> Tuple[int, int]:
... return 0, 0
...
... def add(self, value: Tuple[str, int], accumulator: Tuple[int, int]) \\
... -> Tuple[int, int]:
... return accumulator[0] + value[1], accumulator[1] + 1
...
... def get_result(self, accumulator: Tuple[int, int]) -> float:
... return accumulator[0] / accumulator[1]
...
... def merge(self, a: Tuple[int, int], b: Tuple[int, int]) -> Tuple[int, int]:
... return a[0] + b[0], a[1] + b[1]
>>> ds.key_by(lambda x: x[1]) \\
... .window(TumblingEventTimeWindows.of(Time.seconds(5))) \\
... .aggregate(AverageAggregate(),
... accumulator_type=Types.TUPLE([Types.LONG(), Types.LONG()]),
... output_type=Types.DOUBLE())
:param aggregate_function: The aggregation function that is used for incremental
aggregation.
:param window_function: The window function.
:param accumulator_type: Type information for the internal accumulator type of the
aggregation function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
.. versionadded:: 1.16.0
"""
if window_function is None:
internal_window_function = InternalSingleValueWindowFunction(
PassThroughWindowFunction()) # type: InternalWindowFunction
elif isinstance(window_function, WindowFunction):
internal_window_function = InternalSingleValueWindowFunction(window_function)
elif isinstance(window_function, ProcessWindowFunction):
internal_window_function = InternalSingleValueProcessWindowFunction(window_function)
else:
raise TypeError("window_function should be a WindowFunction or ProcessWindowFunction")
if accumulator_type is None:
accumulator_type = Types.PICKLED_BYTE_ARRAY()
elif isinstance(accumulator_type, list):
accumulator_type = RowTypeInfo(accumulator_type)
aggregating_state_descriptor = AggregatingStateDescriptor(WINDOW_STATE_NAME,
aggregate_function,
accumulator_type)
func_desc = type(aggregate_function).__name__
if window_function is not None:
func_desc = "%s, %s" % (func_desc, type(window_function).__name__)
return self._get_result_data_stream(internal_window_function,
aggregating_state_descriptor,
func_desc,
output_type)
def apply(self,
window_function: WindowFunction, output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Note that this function requires that all data in the windows is buffered until the window
is evaluated, as the function provides no means of incremental aggregation.
:param window_function: The window function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
"""
internal_window_function = InternalIterableWindowFunction(
window_function) # type: InternalWindowFunction
list_state_descriptor = ListStateDescriptor(WINDOW_STATE_NAME, self.get_input_type())
func_desc = type(window_function).__name__
return self._get_result_data_stream(internal_window_function,
list_state_descriptor,
func_desc,
output_type)
def process(self,
process_window_function: ProcessWindowFunction,
output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Note that this function requires that all data in the windows is buffered until the window
is evaluated, as the function provides no means of incremental aggregation.
:param process_window_function: The window function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
"""
internal_window_function = InternalIterableProcessWindowFunction(
process_window_function) # type: InternalWindowFunction
list_state_descriptor = ListStateDescriptor(WINDOW_STATE_NAME, self.get_input_type())
func_desc = type(process_window_function).__name__
return self._get_result_data_stream(internal_window_function,
list_state_descriptor,
func_desc,
output_type)
def _get_result_data_stream(self,
internal_window_function: InternalWindowFunction,
window_state_descriptor: StateDescriptor,
func_desc: str,
output_type: TypeInformation):
if self._window_trigger is None:
self._window_trigger = self._window_assigner.get_default_trigger(
self.get_execution_environment())
window_serializer = self._window_assigner.get_window_serializer()
window_operation_descriptor = WindowOperationDescriptor(
self._window_assigner,
self._window_trigger,
self._allowed_lateness,
self._late_data_output_tag,
window_state_descriptor,
window_serializer,
internal_window_function)
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self._keyed_stream,
window_operation_descriptor,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.WINDOW, # type: ignore
output_type)
op_name = window_operation_descriptor.generate_op_name()
op_desc = window_operation_descriptor.generate_op_desc("Window", func_desc)
return DataStream(self._keyed_stream._j_data_stream.transform(
op_name,
j_output_type_info,
j_python_data_stream_function_operator)).set_description(op_desc)
class AllWindowedStream(object):
"""
A AllWindowedStream represents a data stream where the stream of elements is split into windows
based on a WindowAssigner. Window emission is triggered based on a Trigger.
If an Evictor is specified it will be used to evict elements from the window after evaluation
was triggered by the Trigger but before the actual evaluation of the window.
When using an evictor, window performance will degrade significantly, since pre-aggregation of
window results cannot be used.
Note that the AllWindowedStream is purely an API construct, during runtime the AllWindowedStream
will be collapsed together with the operation over the window into one single operation.
"""
def __init__(self, data_stream: DataStream, window_assigner: WindowAssigner):
self._keyed_stream = data_stream.key_by(NullByteKeySelector())
self._window_assigner = window_assigner
self._allowed_lateness = 0
self._late_data_output_tag = None # type: Optional[OutputTag]
self._window_trigger = None # type: Trigger
def get_execution_environment(self):
return self._keyed_stream.get_execution_environment()
def get_input_type(self):
return _from_java_type(self._keyed_stream._original_data_type_info.get_java_type_info())
def trigger(self, trigger: Trigger) -> 'AllWindowedStream':
"""
Sets the Trigger that should be used to trigger window emission.
"""
if isinstance(self._window_assigner, MergingWindowAssigner) \
and (trigger.can_merge() is not True):
raise TypeError("A merging window assigner cannot be used with a trigger that does "
"not support merging.")
self._window_trigger = trigger
return self
def allowed_lateness(self, time_ms: int) -> 'AllWindowedStream':
"""
Sets the time by which elements are allowed to be late. Elements that arrive behind the
watermark by more than the specified time will be dropped. By default, the allowed lateness
is 0.
Setting an allowed lateness is only valid for event-time windows.
"""
self._allowed_lateness = time_ms
return self
def side_output_late_data(self, output_tag: OutputTag) -> 'AllWindowedStream':
"""
Send late arriving data to the side output identified by the given :class:`OutputTag`. Data
is considered late after the watermark has passed the end of the window plus the allowed
lateness set using :func:`allowed_lateness`.
You can get the stream of late data using :func:`~DataStream.get_side_output` on the
:class:`DataStream` resulting from the windowed operation with the same :class:`OutputTag`.
Example:
::
>>> tag = OutputTag("late-data", Types.TUPLE([Types.INT(), Types.STRING()]))
>>> main_stream = ds.window_all(TumblingEventTimeWindows.of(Time.seconds(5))) \\
... .side_output_late_data(tag) \\
... .process(MyProcessAllWindowFunction(),
... Types.TUPLE([Types.LONG(), Types.LONG(), Types.INT()]))
>>> late_stream = main_stream.get_side_output(tag)
"""
self._late_data_output_tag = output_tag
return self
def reduce(self,
reduce_function: Union[Callable, ReduceFunction],
window_function: Union[AllWindowFunction, ProcessAllWindowFunction] = None,
output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Arriving data is incrementally aggregated using the given reducer.
Example:
::
>>> ds.window_all(TumblingEventTimeWindows.of(Time.seconds(5))) \\
... .reduce(lambda a, b: a[0] + b[0], b[1])
:param reduce_function: The reduce function.
:param window_function: The window function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the reduce function to the window.
.. versionadded:: 1.16.0
"""
if window_function is None:
internal_window_function = InternalSingleValueAllWindowFunction(
PassThroughAllWindowFunction()) # type: InternalWindowFunction
if output_type is None:
output_type = self.get_input_type()
elif isinstance(window_function, AllWindowFunction):
internal_window_function = InternalSingleValueAllWindowFunction(window_function)
elif isinstance(window_function, ProcessAllWindowFunction):
internal_window_function = InternalSingleValueProcessAllWindowFunction(window_function)
else:
raise TypeError("window_function should be a AllWindowFunction or "
"ProcessAllWindowFunction")
reducing_state_descriptor = ReducingStateDescriptor(WINDOW_STATE_NAME,
reduce_function,
self.get_input_type())
func_desc = type(reduce_function).__name__
if window_function is not None:
func_desc = "%s, %s" % (func_desc, type(window_function).__name__)
return self._get_result_data_stream(internal_window_function,
reducing_state_descriptor,
func_desc,
output_type)
def aggregate(self,
aggregate_function: AggregateFunction,
window_function: Union[AllWindowFunction, ProcessAllWindowFunction] = None,
accumulator_type: TypeInformation = None,
output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Arriving data is incrementally aggregated using the given aggregate function. This means
that the window function typically has only a single value to process when called.
Example:
::
>>> class AverageAggregate(AggregateFunction):
... def create_accumulator(self) -> Tuple[int, int]:
... return 0, 0
...
... def add(self, value: Tuple[str, int], accumulator: Tuple[int, int]) \\
... -> Tuple[int, int]:
... return accumulator[0] + value[1], accumulator[1] + 1
...
... def get_result(self, accumulator: Tuple[int, int]) -> float:
... return accumulator[0] / accumulator[1]
...
... def merge(self, a: Tuple[int, int], b: Tuple[int, int]) -> Tuple[int, int]:
... return a[0] + b[0], a[1] + b[1]
...
>>> ds.window_all(TumblingEventTimeWindows.of(Time.seconds(5))) \\
... .aggregate(AverageAggregate(),
... accumulator_type=Types.TUPLE([Types.LONG(), Types.LONG()]),
... output_type=Types.DOUBLE())
:param aggregate_function: The aggregation function that is used for incremental
aggregation.
:param window_function: The window function.
:param accumulator_type: Type information for the internal accumulator type of the
aggregation function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
.. versionadded:: 1.16.0
"""
if window_function is None:
internal_window_function = InternalSingleValueAllWindowFunction(
PassThroughAllWindowFunction()) # type: InternalWindowFunction
elif isinstance(window_function, AllWindowFunction):
internal_window_function = InternalSingleValueAllWindowFunction(window_function)
elif isinstance(window_function, ProcessAllWindowFunction):
internal_window_function = InternalSingleValueProcessAllWindowFunction(window_function)
else:
raise TypeError("window_function should be a AllWindowFunction or "
"ProcessAllWindowFunction")
if accumulator_type is None:
accumulator_type = Types.PICKLED_BYTE_ARRAY()
elif isinstance(accumulator_type, list):
accumulator_type = RowTypeInfo(accumulator_type)
aggregating_state_descriptor = AggregatingStateDescriptor(WINDOW_STATE_NAME,
aggregate_function,
accumulator_type)
func_desc = type(aggregate_function).__name__
if window_function is not None:
func_desc = "%s, %s" % (func_desc, type(window_function).__name__)
return self._get_result_data_stream(internal_window_function,
aggregating_state_descriptor,
func_desc,
output_type)
def apply(self,
window_function: AllWindowFunction,
output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window. The output of the window function is interpreted as a regular
non-windowed stream.
Note that this function requires that all data in the windows is buffered until the window
is evaluated, as the function provides no means of incremental aggregation.
:param window_function: The window function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
"""
internal_window_function = InternalIterableAllWindowFunction(
window_function) # type: InternalWindowFunction
list_state_descriptor = ListStateDescriptor(WINDOW_STATE_NAME, self.get_input_type())
func_desc = type(window_function).__name__
return self._get_result_data_stream(internal_window_function,
list_state_descriptor,
func_desc,
output_type)
def process(self,
process_window_function: ProcessAllWindowFunction,
output_type: TypeInformation = None) -> DataStream:
"""
Applies the given window function to each window. The window function is called for each
evaluation of the window for each key individually. The output of the window function is
interpreted as a regular non-windowed stream.
Note that this function requires that all data in the windows is buffered until the window
is evaluated, as the function provides no means of incremental aggregation.
:param process_window_function: The window function.
:param output_type: Type information for the result type of the window function.
:return: The data stream that is the result of applying the window function to the window.
"""
internal_window_function = InternalIterableProcessAllWindowFunction(
process_window_function) # type: InternalWindowFunction
list_state_descriptor = ListStateDescriptor(WINDOW_STATE_NAME, self.get_input_type())
func_desc = type(process_window_function).__name__
return self._get_result_data_stream(internal_window_function,
list_state_descriptor,
func_desc,
output_type)
def _get_result_data_stream(self,
internal_window_function: InternalWindowFunction,
window_state_descriptor: StateDescriptor,
func_desc: str,
output_type: TypeInformation):
if self._window_trigger is None:
self._window_trigger = self._window_assigner.get_default_trigger(
self.get_execution_environment())
window_serializer = self._window_assigner.get_window_serializer()
window_operation_descriptor = WindowOperationDescriptor(
self._window_assigner,
self._window_trigger,
self._allowed_lateness,
self._late_data_output_tag,
window_state_descriptor,
window_serializer,
internal_window_function)
from pyflink.fn_execution import flink_fn_execution_pb2
j_python_data_stream_function_operator, j_output_type_info = \
_get_one_input_stream_operator(
self._keyed_stream,
window_operation_descriptor,
flink_fn_execution_pb2.UserDefinedDataStreamFunction.WINDOW, # type: ignore
output_type)
op_name = window_operation_descriptor.generate_op_name()
op_desc = window_operation_descriptor.generate_op_desc("AllWindow", func_desc)
return DataStream(self._keyed_stream._j_data_stream.transform(
op_name,
j_output_type_info,
j_python_data_stream_function_operator)).set_description(op_desc)
class ConnectedStreams(object):
"""
ConnectedStreams represent two connected streams of (possibly) different data types.
Connected streams are useful for cases where operations on one stream directly
affect the operations on the other stream, usually via shared state between the streams.
An example for the use of connected streams would be to apply rules that change over time
onto another stream. One of the connected streams has the rules, the other stream the
elements to apply the rules to. The operation on the connected stream maintains the
current set of rules in the state. It may receive either a rule update and update the state
or a data element and apply the rules in the state to the element.
The connected stream can be conceptually viewed as a union stream of an Either type, that
holds either the first stream's type or the second stream's type.
"""
def __init__(self, stream1: DataStream, stream2: DataStream):
self.stream1 = stream1
self.stream2 = stream2
def key_by(self, key_selector1: Union[Callable, KeySelector],
key_selector2: Union[Callable, KeySelector],
key_type: TypeInformation = None) -> 'ConnectedStreams':
"""
KeyBy operation for connected data stream. Assigns keys to the elements of
input1 and input2 using keySelector1 and keySelector2 with explicit type information
for the common key type.
:param key_selector1: The `KeySelector` used for grouping the first input.
:param key_selector2: The `KeySelector` used for grouping the second input.
:param key_type: The type information of the common key type
:return: The partitioned `ConnectedStreams`
"""
ds1 = self.stream1
ds2 = self.stream2
if isinstance(self.stream1, KeyedStream):
ds1 = self.stream1._origin_stream
if isinstance(self.stream2, KeyedStream):
ds2 = self.stream2._origin_stream
return ConnectedStreams(
ds1.key_by(key_selector1, key_type),
ds2.key_by(key_selector2, key_type))
def map(self, func: CoMapFunction, output_type: TypeInformation = None) -> 'DataStream':
"""
Applies a CoMap transformation on a `ConnectedStreams` and maps the output to a common
type. The transformation calls a `CoMapFunction.map1` for each element of the first
input and `CoMapFunction.map2` for each element of the second input. Each CoMapFunction
call returns exactly one element.
:param func: The CoMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoMapFunction):
raise TypeError("The input function must be a CoMapFunction!")
if self._is_keyed_stream():
class CoMapKeyedCoProcessFunctionAdapter(KeyedCoProcessFunction):
def __init__(self, co_map_func: CoMapFunction):
self._open_func = co_map_func.open
self._close_func = co_map_func.close
self._map1_func = co_map_func.map1
self._map2_func = co_map_func.map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._map1_func(value)
if result is not None:
yield result
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._map2_func(value)
if result is not None:
yield result
return self.process(CoMapKeyedCoProcessFunctionAdapter(func), output_type) \
.name("Co-Map")
else:
class CoMapCoProcessFunctionAdapter(CoProcessFunction):
def __init__(self, co_map_func: CoMapFunction):
self._open_func = co_map_func.open
self._close_func = co_map_func.close
self._map1_func = co_map_func.map1
self._map2_func = co_map_func.map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'CoProcessFunction.Context'):
result = self._map1_func(value)
if result is not None:
yield result
def process_element2(self, value, ctx: 'CoProcessFunction.Context'):
result = self._map2_func(value)
if result is not None:
yield result
return self.process(CoMapCoProcessFunctionAdapter(func), output_type) \
.name("Co-Map")
def flat_map(self, func: CoFlatMapFunction, output_type: TypeInformation = None) \
-> 'DataStream':
"""
Applies a CoFlatMap transformation on a `ConnectedStreams` and maps the output to a
common type. The transformation calls a `CoFlatMapFunction.flatMap1` for each element
of the first input and `CoFlatMapFunction.flatMap2` for each element of the second
input. Each CoFlatMapFunction call returns any number of elements including none.
:param func: The CoFlatMapFunction used to jointly transform the two input DataStreams
:param output_type: `TypeInformation` for the result type of the function.
:return: The transformed `DataStream`
"""
if not isinstance(func, CoFlatMapFunction):
raise TypeError("The input must be a CoFlatMapFunction!")
if self._is_keyed_stream():
class FlatMapKeyedCoProcessFunctionAdapter(KeyedCoProcessFunction):
def __init__(self, co_flat_map_func: CoFlatMapFunction):
self._open_func = co_flat_map_func.open
self._close_func = co_flat_map_func.close
self._flat_map1_func = co_flat_map_func.flat_map1
self._flat_map2_func = co_flat_map_func.flat_map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._flat_map1_func(value)
if result:
yield from result
def process_element2(self, value, ctx: 'KeyedCoProcessFunction.Context'):
result = self._flat_map2_func(value)
if result:
yield from result
return self.process(FlatMapKeyedCoProcessFunctionAdapter(func), output_type) \
.name("Co-Flat Map")
else:
class FlatMapCoProcessFunctionAdapter(CoProcessFunction):
def __init__(self, co_flat_map_func: CoFlatMapFunction):
self._open_func = co_flat_map_func.open
self._close_func = co_flat_map_func.close
self._flat_map1_func = co_flat_map_func.flat_map1
self._flat_map2_func = co_flat_map_func.flat_map2
def open(self, runtime_context: RuntimeContext):
self._open_func(runtime_context)
def close(self):
self._close_func()
def process_element1(self, value, ctx: 'CoProcessFunction.Context'):
result = self._flat_map1_func(value)
if result:
yield from result
def process_element2(self, value, ctx: 'CoProcessFunction.Context'):
result = self._flat_map2_func(value)
if result:
yield from result
return self.process(FlatMapCoProcessFunctionAdapter(func), output_type) \
.name("Co-Flat Map")
def process(self,
func: Union[CoProcessFunction, KeyedCoProcessFunction],
output_type: TypeInformation = None) -> 'DataStream':
if not isinstance(func, CoProcessFunction) and not isinstance(func, KeyedCoProcessFunction):
raise TypeError("The input must be a CoProcessFunction or KeyedCoProcessFunction!")
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if self._is_keyed_stream():
func_type = UserDefinedDataStreamFunction.KEYED_CO_PROCESS # type: ignore
func_name = "Keyed Co-Process"
else:
func_type = UserDefinedDataStreamFunction.CO_PROCESS # type: ignore
func_name = "Co-Process"
j_connected_stream = self.stream1._j_data_stream.connect(self.stream2._j_data_stream)
j_operator, j_output_type = _get_two_input_stream_operator(
self,
func,
func_type,
output_type)
return DataStream(j_connected_stream.transform(func_name, j_output_type, j_operator))
def _is_keyed_stream(self):
return isinstance(self.stream1, KeyedStream) and isinstance(self.stream2, KeyedStream)
class BroadcastStream(object):
"""
A BroadcastStream is a stream with :class:`state.BroadcastState` (s). This can be created by any
stream using the :meth:`DataStream.broadcast` method and implicitly creates states where the
user can store elements of the created :class:`BroadcastStream`. (see
:class:`BroadcastConnectedStream`).
Note that no further operation can be applied to these streams. The only available option is
to connect them with a keyed or non-keyed stream, using the :meth:`KeyedStream.connect` and the
:meth:`DataStream.connect` respectively. Applying these methods will result it a
:class:`BroadcastConnectedStream` for further processing.
.. versionadded:: 1.16.0
"""
def __init__(
self,
input_stream: Union['DataStream', 'KeyedStream'],
broadcast_state_descriptors: List[MapStateDescriptor],
):
self.input_stream = input_stream
self.broadcast_state_descriptors = broadcast_state_descriptors
class BroadcastConnectedStream(object):
"""
A BroadcastConnectedStream represents the result of connecting a keyed or non-keyed stream, with
a :class:`BroadcastStream` with :class:`~state.BroadcastState` (s). As in the case of
:class:`ConnectedStreams` these streams are useful for cases where operations on one stream
directly affect the operations on the other stream, usually via shared state between the
streams.
An example for the use of such connected streams would be to apply rules that change over time
onto another, possibly keyed stream. The stream with the broadcast state has the rules, and will
store them in the broadcast state, while the other stream will contain the elements to apply the
rules to. By broadcasting the rules, these will be available in all parallel instances, and can
be applied to all partitions of the other stream.
.. versionadded:: 1.16.0
"""
def __init__(
self,
non_broadcast_stream: Union['DataStream', 'KeyedStream'],
broadcast_stream: 'BroadcastStream',
broadcast_state_descriptors: List[MapStateDescriptor],
):
self.non_broadcast_stream = non_broadcast_stream
self.broadcast_stream = broadcast_stream
self.broadcast_state_descriptors = broadcast_state_descriptors
@overload
def process(
self,
func: BroadcastProcessFunction,
output_type: TypeInformation = None,
) -> 'DataStream':
pass
@overload
def process(
self,
func: KeyedBroadcastProcessFunction,
output_type: TypeInformation = None
) -> 'DataStream':
pass
def process(
self,
func: Union[BroadcastProcessFunction, KeyedBroadcastProcessFunction],
output_type: TypeInformation = None,
) -> 'DataStream':
"""
Assumes as inputs a :class:`BroadcastStream` and a :class:`DataStream` or
:class:`KeyedStream` and applies the given :class:`BroadcastProcessFunction` or
:class:`KeyedBroadcastProcessFunction` on them, thereby creating a transformed output
stream.
:param func: The :class:`BroadcastProcessFunction` that is called for each element in the
non-broadcasted :class:`DataStream`, or the :class:`KeyedBroadcastProcessFunction` that
is called for each element in the non-broadcasted :class:`KeyedStream`.
:param output_type: The type of the output elements, should be
:class:`common.TypeInformation` or list (implicit :class:`RowTypeInfo`) or None (
implicit :meth:`Types.PICKLED_BYTE_ARRAY`).
:return: The transformed :class:`DataStream`.
"""
if isinstance(func, BroadcastProcessFunction) and self._is_keyed_stream():
raise TypeError("BroadcastProcessFunction should be applied to non-keyed DataStream")
if isinstance(func, KeyedBroadcastProcessFunction) and (not self._is_keyed_stream()):
raise TypeError("KeyedBroadcastProcessFunction should be applied to keyed DataStream")
j_input_transformation1 = self.non_broadcast_stream._j_data_stream.getTransformation()
j_input_transformation2 = (
self.broadcast_stream.input_stream._j_data_stream.getTransformation()
)
if output_type is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(output_type, list):
output_type_info = RowTypeInfo(output_type)
elif isinstance(output_type, TypeInformation):
output_type_info = output_type
else:
raise TypeError("output_type must be None, list or TypeInformation")
j_output_type = output_type_info.get_java_type_info()
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
jvm = get_gateway().jvm
JPythonConfigUtil = jvm.org.apache.flink.python.util.PythonConfigUtil
if self._is_keyed_stream():
func_type = UserDefinedDataStreamFunction.KEYED_CO_BROADCAST_PROCESS # type: ignore
func_name = "Keyed-Co-Process-Broadcast"
else:
func_type = UserDefinedDataStreamFunction.CO_BROADCAST_PROCESS # type: ignore
func_name = "Co-Process-Broadcast"
j_state_names = to_jarray(
jvm.String, [i.get_name() for i in self.broadcast_state_descriptors]
)
j_state_descriptors = JPythonConfigUtil.convertStateNamesToStateDescriptors(j_state_names)
j_conf = get_j_env_configuration(
self.broadcast_stream.input_stream._j_data_stream.getExecutionEnvironment())
j_data_stream_python_function_info = _create_j_data_stream_python_function_info(
func, func_type
)
j_env = (
self.non_broadcast_stream.get_execution_environment()._j_stream_execution_environment
)
if self._is_keyed_stream():
JTransformation = jvm.org.apache.flink.streaming.api.transformations.python \
.PythonKeyedBroadcastStateTransformation
j_transformation = JTransformation(
func_name,
j_conf,
j_data_stream_python_function_info,
j_input_transformation1,
j_input_transformation2,
j_state_descriptors,
self.non_broadcast_stream._j_data_stream.getKeyType(),
self.non_broadcast_stream._j_data_stream.getKeySelector(),
j_output_type,
j_env.getParallelism(),
)
else:
JTransformation = jvm.org.apache.flink.streaming.api.transformations.python \
.PythonBroadcastStateTransformation
j_transformation = JTransformation(
func_name,
j_conf,
j_data_stream_python_function_info,
j_input_transformation1,
j_input_transformation2,
j_state_descriptors,
j_output_type,
j_env.getParallelism(),
)
j_env.addOperator(j_transformation)
j_data_stream = JPythonConfigUtil.createSingleOutputStreamOperator(j_env, j_transformation)
return DataStream(j_data_stream)
def _is_keyed_stream(self):
return isinstance(self.non_broadcast_stream, KeyedStream)
def _get_one_input_stream_operator(data_stream: DataStream,
func: Union[Function,
FunctionWrapper,
WindowOperationDescriptor],
func_type: int,
output_type: Union[TypeInformation, List] = None):
"""
Create a Java one input stream operator.
:param func: a function object that implements the Function interface.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:param output_type: the data type of the function output data.
:return: A Java operator which is responsible for execution user defined python function.
"""
gateway = get_gateway()
j_input_types = data_stream._j_data_stream.getTransformation().getOutputType()
if output_type is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(output_type, list):
output_type_info = RowTypeInfo(output_type)
else:
output_type_info = output_type
j_data_stream_python_function_info = _create_j_data_stream_python_function_info(func, func_type)
j_output_type_info = output_type_info.get_java_type_info()
j_conf = get_j_env_configuration(data_stream._j_data_stream.getExecutionEnvironment())
python_execution_mode = (
j_conf.getString(gateway.jvm.org.apache.flink.python.PythonOptions.PYTHON_EXECUTION_MODE))
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.PROCESS: # type: ignore
if python_execution_mode == 'thread':
JDataStreamPythonFunctionOperator = gateway.jvm.EmbeddedPythonProcessOperator
else:
JDataStreamPythonFunctionOperator = gateway.jvm.ExternalPythonProcessOperator
elif func_type == UserDefinedDataStreamFunction.KEYED_PROCESS: # type: ignore
if python_execution_mode == 'thread':
JDataStreamPythonFunctionOperator = gateway.jvm.EmbeddedPythonKeyedProcessOperator
else:
JDataStreamPythonFunctionOperator = gateway.jvm.ExternalPythonKeyedProcessOperator
elif func_type == UserDefinedDataStreamFunction.WINDOW: # type: ignore
window_serializer = typing.cast(WindowOperationDescriptor, func).window_serializer
if isinstance(window_serializer, TimeWindowSerializer):
j_namespace_serializer = \
gateway.jvm.org.apache.flink.table.runtime.operators.window.TimeWindow.Serializer()
elif isinstance(window_serializer, CountWindowSerializer):
j_namespace_serializer = \
gateway.jvm.org.apache.flink.table.runtime.operators.window.CountWindow.Serializer()
elif isinstance(window_serializer, GlobalWindowSerializer):
j_namespace_serializer = \
gateway.jvm.org.apache.flink.streaming.api.windowing.windows.GlobalWindow \
.Serializer()
else:
j_namespace_serializer = \
gateway.jvm.org.apache.flink.streaming.api.utils.ByteArrayWrapperSerializer()
if python_execution_mode == 'thread':
JDataStreamPythonWindowFunctionOperator = gateway.jvm.EmbeddedPythonWindowOperator
else:
JDataStreamPythonWindowFunctionOperator = gateway.jvm.ExternalPythonKeyedProcessOperator
j_python_function_operator = JDataStreamPythonWindowFunctionOperator(
j_conf,
j_data_stream_python_function_info,
j_input_types,
j_output_type_info,
j_namespace_serializer)
return j_python_function_operator, j_output_type_info
else:
raise TypeError("Unsupported function type: %s" % func_type)
j_python_function_operator = JDataStreamPythonFunctionOperator(
j_conf,
j_data_stream_python_function_info,
j_input_types,
j_output_type_info)
return j_python_function_operator, j_output_type_info
def _get_two_input_stream_operator(connected_streams: ConnectedStreams,
func: Union[Function, FunctionWrapper],
func_type: int,
type_info: TypeInformation):
"""
Create a Java two input stream operator.
:param func: a function object that implements the Function interface.
:param func_type: function type, supports MAP, FLAT_MAP, etc.
:param type_info: the data type of the function output data.
:return: A Java operator which is responsible for execution user defined python function.
"""
gateway = get_gateway()
j_input_types1 = connected_streams.stream1._j_data_stream.getTransformation().getOutputType()
j_input_types2 = connected_streams.stream2._j_data_stream.getTransformation().getOutputType()
if type_info is None:
output_type_info = Types.PICKLED_BYTE_ARRAY() # type: TypeInformation
elif isinstance(type_info, list):
output_type_info = RowTypeInfo(type_info)
else:
output_type_info = type_info
j_data_stream_python_function_info = _create_j_data_stream_python_function_info(func, func_type)
j_output_type_info = output_type_info.get_java_type_info()
j_conf = get_j_env_configuration(
connected_streams.stream1._j_data_stream.getExecutionEnvironment())
python_execution_mode = (
j_conf.getString(gateway.jvm.org.apache.flink.python.PythonOptions.PYTHON_EXECUTION_MODE))
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
if func_type == UserDefinedDataStreamFunction.CO_PROCESS: # type: ignore
if python_execution_mode == 'thread':
JTwoInputPythonFunctionOperator = gateway.jvm.EmbeddedPythonCoProcessOperator
else:
JTwoInputPythonFunctionOperator = gateway.jvm.ExternalPythonCoProcessOperator
elif func_type == UserDefinedDataStreamFunction.KEYED_CO_PROCESS: # type: ignore
if python_execution_mode == 'thread':
JTwoInputPythonFunctionOperator = gateway.jvm.EmbeddedPythonKeyedCoProcessOperator
else:
JTwoInputPythonFunctionOperator = gateway.jvm.ExternalPythonKeyedCoProcessOperator
else:
raise TypeError("Unsupported function type: %s" % func_type)
j_python_data_stream_function_operator = JTwoInputPythonFunctionOperator(
j_conf,
j_data_stream_python_function_info,
j_input_types1,
j_input_types2,
j_output_type_info)
return j_python_data_stream_function_operator, j_output_type_info
def _create_j_data_stream_python_function_info(
func: Union[Function, FunctionWrapper, WindowOperationDescriptor], func_type: int
) -> bytes:
gateway = get_gateway()
import cloudpickle
serialized_func = cloudpickle.dumps(func)
j_data_stream_python_function = gateway.jvm.DataStreamPythonFunction(
bytearray(serialized_func), _get_python_env()
)
return gateway.jvm.DataStreamPythonFunctionInfo(j_data_stream_python_function, func_type)
class CloseableIterator(object):
"""
Representing an Iterator that is also auto closeable.
"""
def __init__(self, j_closeable_iterator, type_info: TypeInformation = None):
self._j_closeable_iterator = j_closeable_iterator
self._type_info = type_info
def __iter__(self):
return self
def __next__(self):
return self.next()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def next(self):
if not self._j_closeable_iterator.hasNext():
raise StopIteration('No more data.')
return convert_to_python_obj(self._j_closeable_iterator.next(), self._type_info)
def close(self):
self._j_closeable_iterator.close()
| 137,180 | 45.739693 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/timerservice.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
class TimerService(ABC):
"""
Interface for working with time and timers.
"""
@abstractmethod
def current_processing_time(self):
"""
Returns the current processing time.
"""
pass
@abstractmethod
def current_watermark(self):
"""
Returns the current event-time watermark.
"""
pass
@abstractmethod
def register_processing_time_timer(self, timestamp: int):
"""
Registers a timer to be fired when processing time passes the given time.
Timers can internally be scoped to keys and/or windows. When you set a timer in a keyed
context, such as in an operation on KeyedStream then that context will so be active when you
receive the timer notification.
:param timestamp: The processing time of the timer to be registered.
"""
pass
@abstractmethod
def register_event_time_timer(self, timestamp: int):
"""
Registers a timer tobe fired when the event time watermark passes the given time.
Timers can internally be scoped to keys and/or windows. When you set a timer in a keyed
context, such as in an operation on KeyedStream then that context will so be active when you
receive the timer notification.
:param timestamp: The event time of the timer to be registered.
"""
pass
def delete_processing_time_timer(self, timestamp: int):
"""
Deletes the processing-time timer with the given trigger time. This method has only an
effect if such a timer was previously registered and did not already expire.
Timers can internally be scoped to keys and/or windows. When you delete a timer, it is
removed from the current keyed context.
:param timestamp: The given trigger time of timer to be deleted.
"""
pass
def delete_event_time_timer(self, timestamp: int):
"""
Deletes the event-time timer with the given trigger time. This method has only an effect if
such a timer was previously registered and did not already expire.
Timers can internally be scoped to keys and/or windows. When you delete a timer, it is
removed from the current keyed context.
:param timestamp: The given trigger time of timer to be deleted.
"""
pass
| 3,384 | 37.033708 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/checkpoint_storage.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABCMeta
from py4j.java_gateway import get_java_class
from typing import Optional
from pyflink.java_gateway import get_gateway
__all__ = [
'CheckpointStorage',
'JobManagerCheckpointStorage',
'FileSystemCheckpointStorage',
'CustomCheckpointStorage']
def _from_j_checkpoint_storage(j_checkpoint_storage):
if j_checkpoint_storage is None:
return None
gateway = get_gateway()
JCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.CheckpointStorage
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.JobManagerCheckpointStorage
JFileSystemCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage \
.FileSystemCheckpointStorage
j_clz = j_checkpoint_storage.getClass()
if not get_java_class(JCheckpointStorage).isAssignableFrom(j_clz):
raise TypeError("%s is not an instance of CheckpointStorage." % j_checkpoint_storage)
if get_java_class(JJobManagerCheckpointStorage).isAssignableFrom(j_clz):
return JobManagerCheckpointStorage(j_jobmanager_checkpoint_storage=j_checkpoint_storage)
elif get_java_class(JFileSystemCheckpointStorage).isAssignableFrom(j_clz):
return FileSystemCheckpointStorage(j_filesystem_checkpoint_storage=j_checkpoint_storage)
else:
return CustomCheckpointStorage(j_checkpoint_storage)
class CheckpointStorage(object, metaclass=ABCMeta):
"""
Checkpoint storage defines how :class:`StateBackend`'s store their state for fault-tolerance
in streaming applications. Various implementations store their checkpoints in different fashions
and have different requirements and availability guarantees.
For example, :class:`JobManagerCheckpointStorage` stores checkpoints in the memory of the
`JobManager`. It is lightweight and without additional dependencies but is not scalable
and only supports small state sizes. This checkpoints storage policy is convenient for local
testing and development.
:class:`FileSystemCheckpointStorage` stores checkpoints in a filesystem. For systems like HDFS
NFS drives, S3, and GCS, this storage policy supports large state size, in the magnitude of many
terabytes while providing a highly available foundation for streaming applications. This
checkpoint storage policy is recommended for most production deployments.
**Raw Bytes Storage**
The `CheckpointStorage` creates services for raw bytes storage.
The raw bytes storage (through the CheckpointStreamFactory) is the fundamental service that
simply stores bytes in a fault tolerant fashion. This service is used by the JobManager to
store checkpoint and recovery metadata and is typically also used by the keyed- and operator-
state backends to store checkpoint state.
**Serializability**
Implementations need to be serializable(`java.io.Serializable`), because they are distributed
across parallel processes (for distributed execution) together with the streaming application
code.
Because of that `CheckpointStorage` implementations are meant to be like _factories_ that create
the proper state stores that provide access to the persistent layer. That way, the storage
policy can be very lightweight (contain only configurations) which makes it easier to be
serializable.
**Thread Safety**
Checkpoint storage implementations have to be thread-safe. Multiple threads may be creating
streams concurrently.
"""
def __init__(self, j_checkpoint_storage):
self._j_checkpoint_storage = j_checkpoint_storage
class JobManagerCheckpointStorage(CheckpointStorage):
"""
The `CheckpointStorage` checkpoints state directly to the JobManager's memory (hence the
name), but savepoints will be persisted to a file system.
This checkpoint storage is primarily for experimentation, quick local setups, or for streaming
applications that have very small state: Because it requires checkpoints to go through the
JobManager's memory, larger state will occupy larger portions of the JobManager's main memory,
reducing operational stability. For any other setup, the `FileSystemCheckpointStorage`
should be used. The `FileSystemCheckpointStorage` but checkpoints state directly to files
rather than to the JobManager's memory, thus supporting larger state sizes and more highly
available recovery.
**State Size Considerations**
State checkpointing with this checkpoint storage is subject to the following conditions:
- Each individual state must not exceed the configured maximum state size
(see :func:`get_max_state_size`.
- All state from one task (i.e., the sum of all operator states and keyed states from all
chained operators of the task) must not exceed what the RPC system supports, which is
be default < 10 MB. That limit can be configured up, but that is typically not advised.
- The sum of all states in the application times all retained checkpoints must comfortably
fit into the JobManager's JVM heap space.
**Persistence Guarantees**
For the use cases where the state sizes can be handled by this storage, it does
guarantee persistence for savepoints, externalized checkpoints (of configured), and checkpoints
(when high-availability is configured).
**Configuration**
As for all checkpoint storage, this type can either be configured within the application (by
creating the storage with the respective constructor parameters and setting it on the execution
environment) or by specifying it in the Flink configuration.
If the storage was specified in the application, it may pick up additional configuration
parameters from the Flink configuration. For example, if the backend if configured in the
application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster. That behavior is
implemented via the :func:`configure` method.
"""
# The default maximal size that the snapshotted memory state may have (5 MiBytes).
DEFAULT_MAX_STATE_SIZE = 5 * 1024 * 1024
def __init__(self,
checkpoint_path=None,
max_state_size=None,
j_jobmanager_checkpoint_storage=None):
"""
Creates a new JobManagerCheckpointStorage, setting optionally the paths to persist
checkpoint metadata to, as well as configuring state thresholds.
WARNING: Increasing the size of this value beyond the default value
(:data:`DEFAULT_MAX_STATE_SIZE`) should be done with care.
The checkpointed state needs to be send to the JobManager via limited size RPC messages,
and there and the JobManager needs to be able to hold all aggregated state in its memory.
Example:
::
>>> checkpoint_storage = JobManagerCheckpointStorage()
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param max_state_size: The maximal size of the serialized state. If none, the
:data:`DEFAULT_MAX_STATE_SIZE` will be used.
:param j_jobmanager_checkpoint_storage: For internal use, please keep none.
"""
if j_jobmanager_checkpoint_storage is None:
gateway = get_gateway()
JJobManagerCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage\
.JobManagerCheckpointStorage
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_path is not None:
checkpoint_path = JPath(checkpoint_path)
if max_state_size is None:
max_state_size = JJobManagerCheckpointStorage.DEFAULT_MAX_STATE_SIZE
j_jobmanager_checkpoint_storage = JJobManagerCheckpointStorage(checkpoint_path,
max_state_size)
super(JobManagerCheckpointStorage, self).__init__(j_jobmanager_checkpoint_storage)
def get_checkpoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
j_path = self._j_checkpoint_storage.getCheckpointPath()
if j_path is None:
return None
else:
return j_path.toString()
def get_max_state_size(self) -> int:
"""
Gets the maximum size that an individual state can have, as configured in the
constructor. By default :data:`DEFAULT_MAX_STATE_SIZE` will be used.
"""
return self._j_checkpoint_storage.getMaxStateSize()
def get_savepoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the savepoints are stored.
The job-specific savepoint directory is created inside this directory.
:return: The base directory for savepoints.
"""
j_path = self._j_checkpoint_storage.getSavepointPath()
if j_path is None:
return None
else:
return j_path.toString()
def __str__(self):
return self._j_checkpoint_storage.toString()
class FileSystemCheckpointStorage(CheckpointStorage):
"""
`FileSystemCheckpointStorage` checkpoints state as files to a filesystem.
Each checkpoint will store all its files in a subdirectory that includes the
checkpoints number, such as `hdfs://namenode:port/flink-checkpoints/chk-17/`.
**State Size Considerations**
This checkpoint storage stores small state chunks directly with the metadata, to avoid creating
many small files. The threshold for that is configurable. When increasing this threshold, the
size of the checkpoint metadata increases. The checkpoint metadata of all retained completed
checkpoints needs to fit into the JobManager's heap memory. This is typically not a problem,
unless the threashold `get_min_file_size_threshold` is increased significantly.
**Persistence Guarantees**
Checkpoints from this checkpoint storage are as persistent and available as the filesystem
that it is written to. If the file system is a persistent distributed file system, this
checkpoint storage supports highly available setups. The backend additionally supports
savepoints and externalized checkpoints.
**Configuration**
As for all checkpoint storage policies, this backend can either be configured within the
application (by creating the storage with the respective constructor parameters and setting
it on the execution environment) or by specifying it in the Flink configuration.
If the checkpoint storage was specified in the application, it may pick up additional
configuration parameters from the Flink configuration. For example, if the storage is configured
in the application without a default savepoint directory, it will pick up a default savepoint
directory specified in the Flink configuration of the running job/cluster.
"""
# Maximum size of state that is stored with the metadata, rather than in files (1 MiByte).
MAX_FILE_STATE_THRESHOLD = 1024 * 1024
def __init__(self,
checkpoint_path=None,
file_state_size_threshold=None,
write_buffer_size=-1,
j_filesystem_checkpoint_storage=None):
"""
Creates a new FileSystemCheckpointStorage, setting the paths for the checkpoint data
in a file system.
All file systems for the file system scheme in the URI (e.g., `file://`, `hdfs://`, or
`s3://`) must be accessible via `FileSystem#get`.
For a Job targeting HDFS, this means that the URI must either specify the authority (host
and port), of the Hadoop configuration that describes that information must be in the
classpath.
Example:
::
>>> checkpoint_storage = FileSystemCheckpointStorage("hdfs://checkpoints")
:param checkpoint_path: The path to write checkpoint metadata to. If none, the value from
the runtime configuration will be used.
:param file_state_size_threshold: State below this size will be stored as part of the
metadata, rather than in files. If -1, the value configured
in the runtime configuration will be used, or the default
value (1KB) if nothing is configured.
:param write_buffer_size: Write buffer size used to serialize state. If -1, the value
configured in the runtime configuration will be used, or the
default value (4KB) if nothing is configured.
:param j_filesystem_checkpoint_storage: For internal use, please keep none.
"""
if j_filesystem_checkpoint_storage is None:
gateway = get_gateway()
JFileSystemCheckpointStorage = gateway.jvm.org.apache.flink.runtime.state.storage\
.FileSystemCheckpointStorage
JPath = gateway.jvm.org.apache.flink.core.fs.Path
if checkpoint_path is None:
raise ValueError("checkpoint_path must not be None")
else:
checkpoint_path = JPath(checkpoint_path)
if file_state_size_threshold is None:
file_state_size_threshold = -1
j_filesystem_checkpoint_storage = JFileSystemCheckpointStorage(
checkpoint_path,
file_state_size_threshold,
write_buffer_size)
super(FileSystemCheckpointStorage, self).__init__(j_filesystem_checkpoint_storage)
def get_checkpoint_path(self) -> str:
"""
Gets the base directory where all the checkpoints are stored.
The job-specific checkpoint directory is created inside this directory.
:return: The base directory for checkpoints.
"""
return self._j_checkpoint_storage.getCheckpointPath().toString()
def get_savepoint_path(self) -> Optional[str]:
"""
Gets the base directory where all the savepoints are stored.
The job-specific savepoint directory is created inside this directory.
:return: The base directory for savepoints.
"""
j_path = self._j_checkpoint_storage.getSavepointPath()
if j_path is None:
return None
else:
return j_path.toString()
def get_min_file_size_threshold(self) -> int:
"""
Gets the threshold below which state is stored as part of the metadata, rather than in
file. This threshold ensures the backend does not create a large amount of small files,
where potentially the file pointers are larget than the state itself.
"""
return self._j_checkpoint_storage.getMinFileSizeThreshold()
def get_write_buffer_size(self) -> int:
"""
Gets the write buffer size for created checkpoint streams.
"""
return self._j_checkpoint_storage.getWriteBufferSize()
def __str__(self):
return self._j_checkpoint_storage.toString()
class CustomCheckpointStorage(CheckpointStorage):
"""
A wrapper of customized java checkpoint storage.
"""
def __init__(self, j_custom_checkpoint_storage):
super(CustomCheckpointStorage, self).__init__(j_custom_checkpoint_storage)
| 16,747 | 45.010989 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/time_domain.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
class TimeDomain(Enum):
"""
TimeDomain specifies whether a firing timer is based on event time or processing time.
EVENT_TIME: Time is based on timestamp of events.
PROCESSING_TIME: Time is based on the current processing-time of a machine where processing
happens.
"""
EVENT_TIME = 0
PROCESSING_TIME = 1
| 1,321 | 40.3125 | 95 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/slot_sharing_group.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
__all__ = ['MemorySize', 'SlotSharingGroup']
from typing import Optional
from pyflink.java_gateway import get_gateway
class MemorySize(object):
"""
MemorySize is a representation of a number of bytes, viewable in different units.
"""
def __init__(self, j_memory_size=None, bytes_size: int = None):
self._j_memory_size = get_gateway().jvm \
.org.apache.flink.configuration.MemorySize(bytes_size) \
if j_memory_size is None else j_memory_size
@staticmethod
def of_mebi_bytes(mebi_bytes: int) -> 'MemorySize':
return MemorySize(
get_gateway().jvm.org.apache.flink.configuration.MemorySize.ofMebiBytes(mebi_bytes))
def get_bytes(self) -> int:
"""
Gets the memory size in bytes.
:return: The memory size in bytes.
"""
return self._j_memory_size.getBytes()
def get_kibi_bytes(self) -> int:
"""
Gets the memory size in Kibibytes (= 1024 bytes).
:return: The memory size in Kibibytes.
"""
return self._j_memory_size.getKibiBytes()
def get_mebi_bytes(self) -> int:
"""
Gets the memory size in Mebibytes (= 1024 Kibibytes).
:return: The memory size in Mebibytes.
"""
return self._j_memory_size.getMebiBytes()
def get_gibi_bytes(self) -> int:
"""
Gets the memory size in Gibibytes (= 1024 Mebibytes).
:return: The memory size in Gibibytes.
"""
return self._j_memory_size.getGibiBytes()
def get_tebi_bytes(self) -> int:
"""
Gets the memory size in Tebibytes (= 1024 Gibibytes).
:return: The memory size in Tebibytes.
"""
return self._j_memory_size.getTebiBytes()
def get_java_memory_size(self):
"""
Gets the Java MemorySize object.
:return: The Java MemorySize object.
"""
return self._j_memory_size
def __eq__(self, other):
return isinstance(other, self.__class__) and self._j_memory_size == other._j_memory_size
def __hash__(self):
return self._j_memory_size.hashCode()
def __lt__(self, other: 'MemorySize'):
if not isinstance(other, MemorySize):
raise Exception("Does not support comparison with non-MemorySize %s" % other)
return self._j_memory_size.compareTo(other._j_memory_size) == -1
def __le__(self, other: 'MemorySize'):
return self.__eq__(other) and self.__lt__(other)
def __str__(self):
return self._j_memory_size.toString()
class SlotSharingGroup(object):
"""
Describe the name and the different resource components of a slot sharing group.
"""
def __init__(self, j_slot_sharing_group):
self._j_slot_sharing_group = j_slot_sharing_group
def get_name(self) -> str:
"""
Gets the name of this SlotSharingGroup.
:return: The name of the SlotSharingGroup.
"""
return self._j_slot_sharing_group.getName()
def get_managed_memory(self) -> Optional[MemorySize]:
"""
Gets the task managed memory for this SlotSharingGroup.
:return: The task managed memory of the SlotSharingGroup.
"""
managed_memory = self._j_slot_sharing_group.getManagedMemory()
return MemorySize(managed_memory.get()) if managed_memory.isPresent() else None
def get_task_heap_memory(self) -> Optional[MemorySize]:
"""
Gets the task heap memory for this SlotSharingGroup.
:return: The task heap memory of the SlotSharingGroup.
"""
task_heap_memory = self._j_slot_sharing_group.getTaskHeapMemory()
return MemorySize(task_heap_memory.get()) if task_heap_memory.isPresent() else None
def get_task_off_heap_memory(self) -> Optional[MemorySize]:
"""
Gets the task off-heap memory for this SlotSharingGroup.
:return: The task off-heap memory of the SlotSharingGroup.
"""
task_off_heap_memory = self._j_slot_sharing_group.getTaskOffHeapMemory()
return MemorySize(task_off_heap_memory.get()) if task_off_heap_memory.isPresent() else None
def get_cpu_cores(self) -> Optional[float]:
"""
Gets the CPU cores for this SlotSharingGroup.
:return: The CPU cores of the SlotSharingGroup.
"""
cpu_cores = self._j_slot_sharing_group.getCpuCores()
return cpu_cores.get() if cpu_cores.isPresent() else None
def get_external_resources(self) -> dict:
"""
Gets the external resource from this SlotSharingGroup.
:return: User specified resources of the SlotSharingGroup.
"""
return dict(self._j_slot_sharing_group.getExternalResources())
def get_java_slot_sharing_group(self):
"""
Gets the Java SlotSharingGroup object.
:return: The Java SlotSharingGroup object.
"""
return self._j_slot_sharing_group
@staticmethod
def builder(name: str) -> 'Builder':
"""
Gets the Builder with the given name for this SlotSharingGroup.
:param name: The name of the SlotSharingGroup.
:return: The builder for the SlotSharingGroup.
"""
return SlotSharingGroup.Builder(
get_gateway().jvm.org.apache.flink.api.common.operators.SlotSharingGroup.newBuilder(
name))
def __eq__(self, other):
return isinstance(other, self.__class__) and \
self._j_slot_sharing_group == other._j_slot_sharing_group
def __hash__(self):
return self._j_slot_sharing_group.hashCode()
class Builder(object):
"""
Builder for the SlotSharingGroup.
"""
def __init__(self, j_builder):
self._j_builder = j_builder
def set_cpu_cores(self, cpu_cores: float) -> 'SlotSharingGroup.Builder':
"""
Sets the CPU cores for this SlotSharingGroup.
:param cpu_cores: The CPU cores of the SlotSharingGroup.
:return: This object.
"""
self._j_builder.setCpuCores(cpu_cores)
return self
def set_task_heap_memory(self, task_heap_memory: MemorySize) -> 'SlotSharingGroup.Builder':
"""
Sets the task heap memory for this SlotSharingGroup.
:param task_heap_memory: The task heap memory of the SlotSharingGroup.
:return: This object.
"""
self._j_builder.setTaskHeapMemory(task_heap_memory.get_java_memory_size())
return self
def set_task_heap_memory_mb(self, task_heap_memory_mb: int) -> 'SlotSharingGroup.Builder':
"""
Sets the task heap memory for this SlotSharingGroup in MB.
:param task_heap_memory_mb: The task heap memory of the SlotSharingGroup in MB.
:return: This object.
"""
self._j_builder.setTaskHeapMemoryMB(task_heap_memory_mb)
return self
def set_task_off_heap_memory(self, task_off_heap_memory: MemorySize) \
-> 'SlotSharingGroup.Builder':
"""
Sets the task off-heap memory for this SlotSharingGroup.
:param task_off_heap_memory: The task off-heap memory of the SlotSharingGroup.
:return: This object.
"""
self._j_builder.setTaskOffHeapMemory(task_off_heap_memory.get_java_memory_size())
return self
def set_task_off_heap_memory_mb(self, task_off_heap_memory_mb: int) \
-> 'SlotSharingGroup.Builder':
"""
Sets the task off-heap memory for this SlotSharingGroup in MB.
:param task_off_heap_memory_mb: The task off-heap memory of the SlotSharingGroup in MB.
:return: This object.
"""
self._j_builder.setTaskOffHeapMemoryMB(task_off_heap_memory_mb)
return self
def set_managed_memory(self, managed_memory: MemorySize) -> 'SlotSharingGroup.Builder':
"""
Sets the task managed memory for this SlotSharingGroup.
:param managed_memory: The task managed memory of the SlotSharingGroup.
:return: This object.
"""
self._j_builder.setManagedMemory(managed_memory.get_java_memory_size())
return self
def set_managed_memory_mb(self, managed_memory_mb: int) -> 'SlotSharingGroup.Builder':
"""
Sets the task managed memory for this SlotSharingGroup in MB.
:param managed_memory_mb: The task managed memory of the SlotSharingGroup in MB.
:return: This object.
"""
self._j_builder.setManagedMemoryMB(managed_memory_mb)
return self
def set_external_resource(self, name: str, value: float) -> 'SlotSharingGroup.Builder':
"""
Adds the given external resource. The old value with the same resource name will be
replaced if present.
:param name: The resource name of the given external resource.
:param value: The value of the given external resource.
:return: This object.
"""
self._j_builder.setExternalResource(name, value)
return self
def build(self) -> 'SlotSharingGroup':
"""
Builds the SlotSharingGroup.
:return: The SlotSharingGroup object.
"""
return SlotSharingGroup(j_slot_sharing_group=self._j_builder.build())
| 10,515 | 34.768707 | 99 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/time_characteristic.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from enum import Enum
from pyflink.java_gateway import get_gateway
__all__ = ['TimeCharacteristic']
class TimeCharacteristic(Enum):
"""
The time characteristic defines how the system determines time for time-dependent
order and operations that depend on time (such as time windows).
:data:`ProcessingTime`:
Processing time for operators means that the operator uses the system clock of the machine
to determine the current time of the data stream. Processing-time windows trigger based
on wall-clock time and include whatever elements happen to have arrived at the operator at
that point in time.
Using processing time for window operations results in general in quite non-deterministic
results, because the contents of the windows depends on the speed in which elements arrive.
It is, however, the cheapest method of forming windows and the method that introduces the
least latency.
:data:`IngestionTime`:
Ingestion time means that the time of each individual element in the stream is determined
when the element enters the Flink streaming data flow. Operations like windows group the
elements based on that time, meaning that processing speed within the streaming dataflow
does not affect windowing, but only the speed at which sources receive elements.
Ingestion time is often a good compromise between processing time and event time.
It does not need any special manual form of watermark generation, and events are typically
not too much out-or-order when they arrive at operators; in fact, out-of-orderness can
only be introduced by streaming shuffles or split/join/union operations. The fact that
elements are not very much out-of-order means that the latency increase is moderate,
compared to event time.
:data:`EventTime`:
Event time means that the time of each individual element in the stream (also called event)
is determined by the event's individual custom timestamp. These timestamps either exist in
the elements from before they entered the Flink streaming dataflow, or are user-assigned at
the sources. The big implication of this is that it allows for elements to arrive in the
sources and in all operators out of order, meaning that elements with earlier timestamps may
arrive after elements with later timestamps.
Operators that window or order data with respect to event time must buffer data until they
can be sure that all timestamps for a certain time interval have been received. This is
handled by the so called "time watermarks".
Operations based on event time are very predictable - the result of windowing operations
is typically identical no matter when the window is executed and how fast the streams
operate. At the same time, the buffering and tracking of event time is also costlier than
operating with processing time, and typically also introduces more latency. The amount of
extra cost depends mostly on how much out of order the elements arrive, i.e., how long the
time span between the arrival of early and late elements is. With respect to the
"time watermarks", this means that the cost typically depends on how early or late the
watermarks can be generated for their timestamp.
In relation to :data:`IngestionTime`, the event time is similar, but refers the event's
original time, rather than the time assigned at the data source. Practically, that means
that event time has generally more meaning, but also that it takes longer to determine
that all elements for a certain time have arrived.
"""
ProcessingTime = 0
IngestionTime = 1
EventTime = 2
@staticmethod
def _from_j_time_characteristic(j_time_characteristic) -> 'TimeCharacteristic':
return TimeCharacteristic[j_time_characteristic.name()]
def _to_j_time_characteristic(self):
gateway = get_gateway()
JTimeCharacteristic = gateway.jvm.org.apache.flink.streaming.api.TimeCharacteristic
return getattr(JTimeCharacteristic, self.name)
| 5,047 | 51.583333 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
Entry point classes of Flink DataStream API:
- :class:`StreamExecutionEnvironment`:
The context in which a streaming program is executed.
- :class:`DataStream`:
Represents a stream of elements of the same type. A DataStream can be transformed
into another DataStream by applying a transformation.
- :class:`KeyedStream`:
Represents a :class:`DataStream` where elements are partitioned by key using a
provided KeySelector.
- :class:`WindowedStream`:
Represents a data stream where elements are grouped by key, and for each
key, the stream of elements is split into windows based on a WindowAssigner. Window emission
is triggered based on a Trigger.
- :class:`ConnectedStreams`:
Represent two connected streams of (possibly) different data types. Connected
streams are useful for cases where operations on one stream directly affect the operations on
the other stream, usually via shared state between the streams.
- :class:`BroadcastStream`:
Represent a stream with :class:`state.BroadcastState` (s).
- :class:`BroadcastConnectedStream`:
Represents the result of connecting a keyed or non-keyed stream, with a
:class:`BroadcastStream` with :class:`state.BroadcastState` (s)
Functions used to transform a :class:`DataStream` into another :class:`DataStream`:
- :class:`MapFunction`:
Performs a map transformation of a :class:`DataStream` at element wise.
- :class:`CoMapFunction`:
Performs a map transformation over two connected streams.
- :class:`FlatMapFunction`:
Performs a flatmap transformation of a :class:`DataStream` which produces zero, one, or more
elements for each input element.
- :class:`CoFlatMapFunction`:
Performs a flatmap transformation over two connected streams.
- :class:`FilterFunction`:
A filter function is a predicate applied individually to each record.
- :class:`ReduceFunction`:
Combines groups of elements to a single value.
- :class:`ProcessFunction`:
Similar to :class:`FlatMapFunction`, except that it could access the current timestamp and
watermark in :class:`ProcessFunction`.
- :class:`KeyedProcessFunction`:
Similar to :class:`ProcessFunction`, except that it was applied to a :class:`KeyedStream` and
could register event-time and processing-time timers.
- :class:`CoProcessFunction`:
Similar to :class:`CoFlatMapFunction`, except that it could access the current timestamp and
watermark in :class:`CoProcessFunction`.
- :class:`KeyedCoProcessFunction`:
Similar to :class:`CoProcessFunction`, except that it was applied to a keyed
:class:`ConnectedStreams` and could register event-time and processing-time timers.
- :class:`WindowFunction`:
Base interface for functions that are evaluated over keyed (grouped) windows.
- :class:`ProcessWindowFunction`:
Similar to :class:`WindowFunction`, except that it could access a context for retrieving extra
information such as the current timestamp, the watermark, etc.
- :class:`AggregateFunction`:
Base class for a user-defined aggregate function.
- :class:`BroadcastProcessFunction`:
A function to be applied to a :class:`BroadcastConnectedStream` that connects
:class:`BroadcastStream`, i.e. a stream with broadcast state, with a non-keyed
:class:`DataStream`.
- :class:`KeyedBroadcastProcessFunction`:
A function to be applied to a :class:`BroadcastConnectedStream` that connects
:class:`BroadcastStream`, i.e. a stream with broadcast state, with a :class:`KeyedStream`.
- :class:`RuntimeContext`:
Contains information about the context in which functions are executed. Each
parallel instance of the function will have a context through which it can access static
contextual information (such as the current parallelism), etc.
Classes to define window:
- :class:`Window`:
A grouping of elements into finite buckets.
- :class:`TimeWindow`:
A grouping of elements according to a time interval from start (inclusive) to end (exclusive).
- :class:`CountWindow`:
A grouping of elements according to element count from start (inclusive) to end (exclusive).
- :class:`GlobalWindow`:
The window into which all data is placed.
- :class:`WindowAssigner`:
Assigns zero or more :class:`Window` to an element.
- :class:`MergingWindowAssigner`:
A :class:`WindowAssigner` that can merge windows.
- :class:`TriggerResult`:
Result type for trigger methods. This determines what happens with the window, for example
whether the window function should be called, or the window should be discarded.
- :class:`Trigger`:
Determines when a pane of a window should be evaluated to emit the results for that
part of the window.
Classes to define the behavior of checkpoint and state backend:
- :class:`CheckpointingMode`:
Defines what consistency guarantees the system gives in the presence of failures.
- :class:`CheckpointConfig`:
Configuration that captures all checkpointing related settings.
- :class:`StateBackend`:
Base class of the state backends which define how the state of a streaming application is
stored locally within the cluster. Different state backends store their state in different
fashions, and use different data structures to hold the state of a running application.
- :class:`HashMapStateBackend`:
Holds the working state in the memory (JVM heap) of the TaskManagers and
checkpoints based on the configured :class:`CheckpointStorage`.
- :class:`EmbeddedRocksDBStateBackend`:
Stores its state in an embedded `RocksDB` instance. This state backend can store very large
state that exceeds memory and spills to local disk.
- :class:`CustomStateBackend`:
A wrapper of customized java state backend.
- :class:`JobManagerCheckpointStorage`:
Checkpoints state directly to the JobManager's memory (hence the name), but savepoints will
be persisted to a file system.
- :class:`FileSystemCheckpointStorage`:
Checkpoints state as files to a file system. Each checkpoint individually will store all its
files in a subdirectory that includes the checkpoint number, such as
`hdfs://namenode:port/flink-checkpoints/chk-17/`.
- :class:`CustomCheckpointStorage`:
A wrapper of customized java checkpoint storage.
Classes for state operations:
- :class:`state.ValueState`:
Interface for partitioned single-value state. The value can be retrieved or updated.
- :class:`state.ListState`:
Interface for partitioned list state in Operations. The state is accessed and modified by
user functions, and checkpointed consistently by the system as part of the distributed
snapshots.
- :class:`state.MapState`:
Interface for partitioned key-value state. The key-value pair can be added, updated and
retrieved.
- :class:`state.ReducingState`:
Interface for reducing state. Elements can be added to the state, they will be combined using
a :class:`ReduceFunction`. The current state can be inspected.
- :class:`state.AggregatingState`:
Interface for aggregating state, based on an :class:`AggregateFunction`. Elements that are
added to this type of state will be eagerly pre-aggregated using a given AggregateFunction.
- :class:`state.BroadcastState`:
A type of state that can be created to store the state of a :class:`BroadcastStream`. This
state assumes that the same elements are sent to all instances of an operator.
- :class:`state.ReadOnlyBroadcastState`:
A read-only view of the :class:`state.BroadcastState`.
- :class:`state.StateTtlConfig`:
Configuration of state TTL logic.
Classes to define source & sink:
- :class:`connectors.elasticsearch.ElasticsearchSink`:
A sink for publishing data into Elasticsearch 6 or Elasticsearch 7.
- :class:`connectors.kafka.FlinkKafkaConsumer`:
A streaming data source that pulls a parallel data stream from Apache Kafka.
- :class:`connectors.kafka.FlinkKafkaProducer`:
A streaming data sink to produce data into a Kafka topic.
- :class:`connectors.kafka.KafkaSource`:
The new API to read data in parallel from Apache Kafka.
- :class:`connectors.kafka.KafkaSink`:
The new API to write data into to Apache Kafka topics.
- :class:`connectors.file_system.FileSource`:
A unified data source that reads files - both in batch and in streaming mode.
This source supports all (distributed) file systems and object stores that can be accessed via
the Flink's FileSystem class.
- :class:`connectors.file_system.FileSink`:
A unified sink that emits its input elements to FileSystem files within buckets. This
sink achieves exactly-once semantics for both BATCH and STREAMING.
- :class:`connectors.file_system.StreamingFileSink`:
Sink that emits its input elements to files within buckets. This is integrated with the
checkpointing mechanism to provide exactly once semantics.
- :class:`connectors.number_seq.NumberSequenceSource`:
A data source that produces a sequence of numbers (longs). This source is useful for testing
and for cases that just need a stream of N events of any kind.
- :class:`connectors.jdbc.JdbcSink`:
A data sink to produce data into an external storage using JDBC.
- :class:`connectors.pulsar.PulsarSource`:
A streaming data source that pulls a parallel data stream from Pulsar.
- :class:`connectors.pulsar.PulsarSink`:
A streaming data sink to produce data into Pulsar.
- :class:`connectors.rabbitmq.RMQSource`:
A streaming data source that pulls a parallel data stream from RabbitMQ.
- :class:`connectors.rabbitmq.RMQSink`:
A Sink for publishing data into RabbitMQ.
- :class:`connectors.cassandra.CassandraSink`:
A Sink for publishing data into Cassandra.
- :class:`connectors.kinesis.FlinkKinesisConsumer`:
A streaming data source that pulls a parallel data stream from Kinesis.
- :class:`connectors.kinesis.KinesisStreamsSink`:
A Kinesis Data Streams (KDS) Sink that performs async requests against a destination stream
using the buffering protocol.
- :class:`connectors.kinesis.KinesisFirehoseSink`:
A Kinesis Data Firehose (KDF) Sink that performs async requests against a destination delivery
stream using the buffering protocol.
- :class:`connectors.hybrid_source.HybridSource`:
A Hybrid source that switches underlying sources based on configured source chain.
Classes to define formats used together with source & sink:
- :class:`formats.csv.CsvReaderFormat`:
A :class:`~connectors.file_system.StreamFormat` to read CSV files into Row data.
- :class:`formats.csv.CsvBulkWriter`:
Creates :class:`~pyflink.common.serialization.BulkWriterFactory` to write Row data into CSV
files.
- :class:`formats.avro.GenericRecordAvroTypeInfo`:
A :class:`~pyflink.common.typeinfo.TypeInformation` to indicate vanilla Python records will be
translated to GenericRecordAvroTypeInfo on the Java side.
- :class:`formats.avro.AvroInputFormat`:
An InputFormat to read avro files in a streaming fashion.
- :class:`formats.avro.AvroWriters`:
A class to provide :class:`~pyflink.common.serialization.BulkWriterFactory` to write vanilla
Python objects into avro files in a batch fashion.
- :class:`formats.parquet.ParquetColumnarRowInputFormat`:
A :class:`~connectors.file_system.BulkFormat` to read columnar parquet files into Row data in
a batch-processing fashion.
- :class:`formats.parquet.ParquetBulkWriters`:
Convenient builder to create a :class:`~pyflink.common.serialization.BulkWriterFactory` that
writes Rows with a defined RowType into Parquet files in a batch fashion.
- :class:`formats.parquet.AvroParquetReaders`:
A convenience builder to create reader format that reads individual Avro records from a
Parquet stream. Only GenericRecord is supported in PyFlink.
- :class:`formats.parquet.AvroParquetWriters`:
Convenience builder to create ParquetWriterFactory instances for Avro types. Only
GenericRecord is supported in PyFlink.
- :class:`formats.orc.OrcBulkWriters`:
Convenient builder to create a :class:`~pyflink.common.serialization.BulkWriterFactory` that
writes Row records with a defined :class:`RowType` into Orc files.
Other important classes:
- :class:`TimeCharacteristic`:
Defines how the system determines time for time-dependent order and operations that depend
on time (such as time windows).
- :class:`TimeDomain`:
Specifies whether a firing timer is based on event time or processing time.
- :class:`KeySelector`:
The extractor takes an object and returns the deterministic key for that object.
- :class:`Partitioner`:
Function to implement a custom partition assignment for keys.
- :class:`SinkFunction`:
Interface for implementing user defined sink functionality.
- :class:`SourceFunction`:
Interface for implementing user defined source functionality.
- :class:`OutputTag`:
Tag with a name and type for identifying side output of an operator
"""
from pyflink.datastream.checkpoint_config import CheckpointConfig, ExternalizedCheckpointCleanup
from pyflink.datastream.checkpointing_mode import CheckpointingMode
from pyflink.datastream.data_stream import DataStream, KeyedStream, WindowedStream, \
ConnectedStreams, DataStreamSink, BroadcastStream, BroadcastConnectedStream
from pyflink.datastream.execution_mode import RuntimeExecutionMode
from pyflink.datastream.functions import (MapFunction, CoMapFunction, FlatMapFunction,
CoFlatMapFunction, ReduceFunction, RuntimeContext,
KeySelector, FilterFunction, Partitioner, SourceFunction,
SinkFunction, CoProcessFunction, KeyedProcessFunction,
KeyedCoProcessFunction, AggregateFunction, WindowFunction,
ProcessWindowFunction, BroadcastProcessFunction,
KeyedBroadcastProcessFunction)
from pyflink.datastream.slot_sharing_group import SlotSharingGroup, MemorySize
from pyflink.datastream.state_backend import (StateBackend, MemoryStateBackend, FsStateBackend,
RocksDBStateBackend, CustomStateBackend,
PredefinedOptions, HashMapStateBackend,
EmbeddedRocksDBStateBackend)
from pyflink.datastream.checkpoint_storage import (CheckpointStorage, JobManagerCheckpointStorage,
FileSystemCheckpointStorage,
CustomCheckpointStorage)
from pyflink.datastream.stream_execution_environment import StreamExecutionEnvironment
from pyflink.datastream.time_characteristic import TimeCharacteristic
from pyflink.datastream.time_domain import TimeDomain
from pyflink.datastream.functions import ProcessFunction
from pyflink.datastream.timerservice import TimerService
from pyflink.datastream.window import Window, TimeWindow, CountWindow, WindowAssigner, \
MergingWindowAssigner, TriggerResult, Trigger, GlobalWindow
from pyflink.datastream.output_tag import OutputTag
__all__ = [
'StreamExecutionEnvironment',
'DataStream',
'KeyedStream',
'WindowedStream',
'ConnectedStreams',
'BroadcastStream',
'BroadcastConnectedStream',
'DataStreamSink',
'MapFunction',
'CoMapFunction',
'FlatMapFunction',
'CoFlatMapFunction',
'ReduceFunction',
'FilterFunction',
'ProcessFunction',
'KeyedProcessFunction',
'CoProcessFunction',
'KeyedCoProcessFunction',
'WindowFunction',
'ProcessWindowFunction',
'AggregateFunction',
'BroadcastProcessFunction',
'KeyedBroadcastProcessFunction',
'RuntimeContext',
'TimerService',
'CheckpointingMode',
'CheckpointConfig',
'ExternalizedCheckpointCleanup',
'StateBackend',
'HashMapStateBackend',
'EmbeddedRocksDBStateBackend',
'CustomStateBackend',
'MemoryStateBackend',
'RocksDBStateBackend',
'FsStateBackend',
'PredefinedOptions',
'CheckpointStorage',
'JobManagerCheckpointStorage',
'FileSystemCheckpointStorage',
'CustomCheckpointStorage',
'RuntimeExecutionMode',
'Window',
'TimeWindow',
'CountWindow',
'GlobalWindow',
'WindowAssigner',
'MergingWindowAssigner',
'TriggerResult',
'Trigger',
'TimeCharacteristic',
'TimeDomain',
'KeySelector',
'Partitioner',
'SourceFunction',
'SinkFunction',
'SlotSharingGroup',
'MemorySize',
'OutputTag'
]
| 18,158 | 50.735043 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/orc.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Optional, TYPE_CHECKING
from pyflink.common import Configuration
from pyflink.common.serialization import BulkWriterFactory, RowDataBulkWriterFactory
from pyflink.datastream.utils import create_hadoop_configuration, create_java_properties
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import to_jarray
if TYPE_CHECKING:
from pyflink.table.types import RowType
__all__ = [
'OrcBulkWriters'
]
class OrcBulkWriters(object):
"""
Convenient builder to create a :class:`~pyflink.common.serialization.BulkWriterFactory` that
writes records with a predefined schema into Orc files in a batch fashion.
Example:
::
>>> row_type = DataTypes.ROW([
... DataTypes.FIELD('string', DataTypes.STRING()),
... DataTypes.FIELD('int_array', DataTypes.ARRAY(DataTypes.INT()))
... ])
>>> sink = FileSink.for_bulk_format(
... OUTPUT_DIR, OrcBulkWriters.for_row_type(
... row_type=row_type,
... writer_properties=Configuration(),
... hadoop_config=Configuration(),
... )
... ).build()
>>> ds.sink_to(sink)
.. versionadded:: 1.16.0
"""
@staticmethod
def for_row_type(row_type: 'RowType',
writer_properties: Optional[Configuration] = None,
hadoop_config: Optional[Configuration] = None) \
-> BulkWriterFactory:
"""
Create a :class:`~pyflink.common.serialization.BulkWriterFactory` that writes records
with a predefined schema into Orc files in a batch fashion.
:param row_type: The RowType of records, it should match the RowTypeInfo of Row records.
:param writer_properties: Orc writer options.
:param hadoop_config: Hadoop configuration.
"""
from pyflink.table.types import RowType
if not isinstance(row_type, RowType):
raise TypeError('row_type must be an instance of RowType')
from pyflink.table.types import _to_java_data_type
j_data_type = _to_java_data_type(row_type)
jvm = get_gateway().jvm
j_row_type = j_data_type.getLogicalType()
orc_types = to_jarray(
jvm.org.apache.flink.table.types.logical.LogicalType,
[i for i in j_row_type.getChildren()]
)
type_description = jvm.org.apache.flink.orc \
.OrcSplitReaderUtil.logicalTypeToOrcType(j_row_type)
if writer_properties is None:
writer_properties = Configuration()
if hadoop_config is None:
hadoop_config = Configuration()
return RowDataBulkWriterFactory(
jvm.org.apache.flink.orc.writer.OrcBulkWriterFactory(
jvm.org.apache.flink.orc.vector.RowDataVectorizer(
type_description.toString(),
orc_types
),
create_java_properties(writer_properties),
create_hadoop_configuration(hadoop_config)
),
row_type
)
| 4,057 | 39.178218 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/avro.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from py4j.java_gateway import get_java_class, JavaObject, java_import
from pyflink.common.io import InputFormat
from pyflink.common.serialization import BulkWriterFactory, SerializationSchema, \
DeserializationSchema
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.utils import ResultTypeQueryable
from pyflink.java_gateway import get_gateway
from pyflink.util.java_utils import get_field_value, load_java_class
__all__ = [
'AvroSchema',
'GenericRecordAvroTypeInfo',
'AvroInputFormat',
'AvroBulkWriters',
'AvroRowDeserializationSchema',
'AvroRowSerializationSchema'
]
class AvroSchema(object):
"""
Avro Schema class contains Java org.apache.avro.Schema.
.. versionadded:: 1.16.0
"""
def __init__(self, j_schema):
self._j_schema = j_schema
self._schema_string = None
def __str__(self):
if self._schema_string is None:
self._schema_string = get_field_value(self._j_schema, 'schema').toString()
return self._schema_string
@staticmethod
def parse_string(json_schema: str) -> 'AvroSchema':
"""
Parse JSON string as Avro Schema.
:param json_schema: JSON represented schema string.
:return: the Avro Schema.
"""
JSchema = get_gateway().jvm.org.apache.flink.avro.shaded.org.apache.avro.Schema
return AvroSchema(JSchema.Parser().parse(json_schema))
@staticmethod
def parse_file(file_path: str) -> 'AvroSchema':
"""
Parse a schema definition file as Avro Schema.
:param file_path: path to schema definition file.
:return: the Avro Schema.
"""
jvm = get_gateway().jvm
j_file = jvm.java.io.File(file_path)
JSchema = jvm.org.apache.flink.avro.shaded.org.apache.avro.Schema
return AvroSchema(JSchema.Parser().parse(j_file))
class GenericRecordAvroTypeInfo(TypeInformation):
"""
A :class:`TypeInformation` of Avro's GenericRecord, including the schema. This is a wrapper of
Java org.apache.flink.formats.avro.typeutils.GenericRecordAvroTypeInfo.
Note that this type cannot be used as the type_info of data in
:meth:`StreamExecutionEnvironment.from_collection`.
.. versionadded::1.16.0
"""
def __init__(self, schema: 'AvroSchema'):
super(GenericRecordAvroTypeInfo, self).__init__()
self._schema = schema
self._j_typeinfo = get_gateway().jvm.org.apache.flink.formats.avro.typeutils \
.GenericRecordAvroTypeInfo(schema._j_schema)
def get_java_type_info(self) -> JavaObject:
return self._j_typeinfo
class AvroInputFormat(InputFormat, ResultTypeQueryable):
"""
Provides a FileInputFormat for Avro records.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> schema = AvroSchema.parse_string(JSON_SCHEMA)
>>> ds = env.create_input(AvroInputFormat(FILE_PATH, schema))
.. versionadded:: 1.16.0
"""
def __init__(self, path: str, schema: 'AvroSchema'):
"""
:param path: The path to Avro data file.
:param schema: The :class:`AvroSchema` of generic record.
"""
jvm = get_gateway().jvm
j_avro_input_format = jvm.org.apache.flink.formats.avro.AvroInputFormat(
jvm.org.apache.flink.core.fs.Path(path),
get_java_class(jvm.org.apache.flink.avro.shaded.org.apache.avro.generic.GenericRecord)
)
super(AvroInputFormat, self).__init__(j_avro_input_format)
self._type_info = GenericRecordAvroTypeInfo(schema)
def get_produced_type(self) -> GenericRecordAvroTypeInfo:
return self._type_info
class AvroBulkWriters(object):
"""
Convenience builder to create :class:`~pyflink.common.serialization.BulkWriterFactory` for
Avro types.
.. versionadded:: 1.16.0
"""
@staticmethod
def for_generic_record(schema: 'AvroSchema') -> 'BulkWriterFactory':
"""
Creates an AvroWriterFactory that accepts and writes Avro generic types. The Avro writers
will use the given schema to build and write the records.
Note that to make this works in PyFlink, you need to declare the output type of the
predecessor before FileSink to be :class:`~GenericRecordAvroTypeInfo`, and the predecessor
cannot be :meth:`StreamExecutionEnvironment.from_collection`, you can add a pass-through map
function before the sink, as the example shown below.
The Python data records should match the Avro schema, and have the same behavior with
vanilla Python data structure, e.g. an object for Avro array should behave like Python list,
an object for Avro map should behave like Python dict.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> schema = AvroSchema(JSON_SCHEMA)
>>> avro_type_info = GenericRecordAvroTypeInfo(schema)
>>> ds = env.from_collection([{'array': [1, 2]}], type_info=Types.PICKLED_BYTE_ARRAY())
>>> sink = FileSink.for_bulk_format(
... OUTPUT_DIR, AvroBulkWriters.for_generic_record(schema)).build()
>>> # A map to indicate its Avro type info is necessary for serialization
>>> ds.map(lambda e: e, output_type=GenericRecordAvroTypeInfo(schema)) \\
... .sink_to(sink)
:param schema: The avro schema.
:return: The BulkWriterFactory to write generic records into avro files.
"""
jvm = get_gateway().jvm
j_bulk_writer_factory = jvm.org.apache.flink.formats.avro.AvroWriters.forGenericRecord(
schema._j_schema
)
return BulkWriterFactory(j_bulk_writer_factory)
class AvroRowDeserializationSchema(DeserializationSchema):
"""
Deserialization schema from Avro bytes to Row. Deserializes the byte[] messages into (nested)
Flink rows. It converts Avro types into types that are compatible with Flink's Table & SQL API.
Projects with Avro records containing logical date/time types need to add a JodaTime dependency.
"""
def __init__(self, record_class: str = None, avro_schema_string: str = None):
"""
Creates an Avro deserialization schema for the given specific record class or Avro schema
string. Having the concrete Avro record class might improve performance.
:param record_class: Avro record class used to deserialize Avro's record to Flink's row.
:param avro_schema_string: Avro schema string to deserialize Avro's record to Flink's row.
"""
if avro_schema_string is None and record_class is None:
raise TypeError("record_class or avro_schema_string should be specified.")
j_deserialization_schema = None
if record_class is not None:
gateway = get_gateway()
java_import(gateway.jvm, record_class)
j_record_class = load_java_class(record_class)
JAvroRowDeserializationSchema = get_gateway().jvm \
.org.apache.flink.formats.avro.AvroRowDeserializationSchema
j_deserialization_schema = JAvroRowDeserializationSchema(j_record_class)
elif avro_schema_string is not None:
JAvroRowDeserializationSchema = get_gateway().jvm \
.org.apache.flink.formats.avro.AvroRowDeserializationSchema
j_deserialization_schema = JAvroRowDeserializationSchema(avro_schema_string)
super(AvroRowDeserializationSchema, self).__init__(j_deserialization_schema)
class AvroRowSerializationSchema(SerializationSchema):
"""
Serialization schema that serializes to Avro binary format.
"""
def __init__(self, record_class: str = None, avro_schema_string: str = None):
"""
Creates AvroSerializationSchema that serializes SpecificRecord using provided schema or
record class.
:param record_class: Avro record class used to serialize Flink's row to Avro's record.
:param avro_schema_string: Avro schema string to serialize Flink's row to Avro's record.
"""
if avro_schema_string is None and record_class is None:
raise TypeError("record_class or avro_schema_string should be specified.")
j_serialization_schema = None
if record_class is not None:
gateway = get_gateway()
java_import(gateway.jvm, record_class)
j_record_class = load_java_class(record_class)
JAvroRowSerializationSchema = get_gateway().jvm \
.org.apache.flink.formats.avro.AvroRowSerializationSchema
j_serialization_schema = JAvroRowSerializationSchema(j_record_class)
elif avro_schema_string is not None:
JAvroRowSerializationSchema = get_gateway().jvm \
.org.apache.flink.formats.avro.AvroRowSerializationSchema
j_serialization_schema = JAvroRowSerializationSchema(avro_schema_string)
super(AvroRowSerializationSchema, self).__init__(j_serialization_schema)
| 10,081 | 40.319672 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/parquet.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Optional, TYPE_CHECKING
from pyflink.common import Configuration
from pyflink.common.serialization import BulkWriterFactory, RowDataBulkWriterFactory
from pyflink.datastream.connectors.file_system import StreamFormat, BulkFormat
from pyflink.datastream.formats.avro import AvroSchema
from pyflink.datastream.utils import create_hadoop_configuration
from pyflink.java_gateway import get_gateway
if TYPE_CHECKING:
from pyflink.table.types import RowType
__all__ = [
'AvroParquetReaders',
'AvroParquetWriters',
'ParquetColumnarRowInputFormat',
'ParquetBulkWriters'
]
class AvroParquetReaders(object):
"""
A convenience builder to create reader format that reads individual Avro records from a
Parquet stream. Only GenericRecord is supported in PyFlink.
.. versionadded:: 1.16.0
"""
@staticmethod
def for_generic_record(schema: 'AvroSchema') -> 'StreamFormat':
"""
Creates a new AvroParquetRecordFormat that reads the parquet file into Avro GenericRecords.
To read into GenericRecords, this method needs an Avro Schema. That is because Flink needs
to be able to serialize the results in its data flow, which is very inefficient without the
schema. And while the Schema is stored in the Avro file header, Flink needs this schema
during 'pre-flight' time when the data flow is set up and wired, which is before there is
access to the files.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> schema = AvroSchema.parse_string(JSON_SCHEMA)
>>> source = FileSource.for_record_stream_format(
... AvroParquetReaders.for_generic_record(schema),
... PARQUET_FILE_PATH
... ).build()
>>> ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "parquet-source")
:param schema: the Avro Schema.
:return: StreamFormat for reading Avro GenericRecords.
"""
jvm = get_gateway().jvm
JAvroParquetReaders = jvm.org.apache.flink.formats.parquet.avro.AvroParquetReaders
return StreamFormat(JAvroParquetReaders.forGenericRecord(schema._j_schema))
class AvroParquetWriters(object):
"""
Convenient builder to create Parquet BulkWriterFactory instances for Avro types.
Only GenericRecord is supported at present.
.. versionadded:: 1.16.0
"""
@staticmethod
def for_generic_record(schema: 'AvroSchema') -> 'BulkWriterFactory':
"""
Creates a ParquetWriterFactory that accepts and writes Avro generic types. The Parquet
writers will use the given schema to build and write the columnar data.
Note that to make this works in PyFlink, you need to declare the output type of the
predecessor before FileSink to be :class:`GenericRecordAvroTypeInfo`, and the predecessor
cannot be :meth:`StreamExecutionEnvironment.from_collection`, you can add a pass-through map
function before the sink, as the example shown below.
The Python data records should match the Avro schema, and have the same behavior with
vanilla Python data structure, e.g. an object for Avro array should behave like Python list,
an object for Avro map should behave like Python dict.
Example:
::
>>> env = StreamExecutionEnvironment.get_execution_environment()
>>> schema = AvroSchema(JSON_SCHEMA)
>>> avro_type_info = GenericRecordAvroTypeInfo(schema)
>>> ds = env.from_collection([{'array': [1, 2]}], type_info=Types.PICKLED_BYTE_ARRAY())
>>> sink = FileSink.for_bulk_format(
... OUTPUT_DIR, AvroParquetWriters.for_generic_record(schema)).build()
>>> # A map to indicate its Avro type info is necessary for serialization
>>> ds.map(lambda e: e, output_type=GenericRecordAvroTypeInfo(schema)) \\
... .sink_to(sink)
:param schema: The avro schema.
:return: The BulkWriterFactory to write generic records into parquet files.
"""
jvm = get_gateway().jvm
JAvroParquetWriters = jvm.org.apache.flink.formats.parquet.avro.AvroParquetWriters
return BulkWriterFactory(JAvroParquetWriters.forGenericRecord(schema._j_schema))
class ParquetColumnarRowInputFormat(BulkFormat):
"""
A ParquetVectorizedInputFormat to provide RowData iterator. Using ColumnarRowData to
provide a row view of column batch. Only **primitive** types are supported for a column,
composite types such as array, map are not supported.
Example:
::
>>> row_type = DataTypes.ROW([
... DataTypes.FIELD('a', DataTypes.INT()),
... DataTypes.FIELD('b', DataTypes.STRING()),
... ])
>>> source = FileSource.for_bulk_file_format(ParquetColumnarRowInputFormat(
... row_type=row_type,
... hadoop_config=Configuration(),
... batch_size=2048,
... is_utc_timestamp=False,
... is_case_sensitive=True,
... ), PARQUET_FILE_PATH).build()
>>> ds = env.from_source(source, WatermarkStrategy.no_watermarks(), "parquet-source")
.. versionadded:: 1.16.0
"""
def __init__(self,
row_type: 'RowType',
hadoop_config: Optional[Configuration] = None,
batch_size: int = 2048,
is_utc_timestamp: bool = False,
is_case_sensitive: bool = True):
if not hadoop_config:
hadoop_config = Configuration()
from pyflink.table.types import _to_java_data_type
jvm = get_gateway().jvm
j_row_type = _to_java_data_type(row_type).getLogicalType()
produced_type_info = jvm.org.apache.flink.table.runtime.typeutils. \
InternalTypeInfo.of(j_row_type)
j_parquet_columnar_format = jvm.org.apache.flink.formats.parquet. \
ParquetColumnarRowInputFormat(create_hadoop_configuration(hadoop_config),
j_row_type, produced_type_info, batch_size,
is_utc_timestamp, is_case_sensitive)
super().__init__(j_parquet_columnar_format)
class ParquetBulkWriters(object):
"""
Convenient builder to create a :class:`~pyflink.common.serialization.BulkWriterFactory` that
writes records with a predefined schema into Parquet files in a batch fashion.
Example:
::
>>> row_type = DataTypes.ROW([
... DataTypes.FIELD('string', DataTypes.STRING()),
... DataTypes.FIELD('int_array', DataTypes.ARRAY(DataTypes.INT()))
... ])
>>> sink = FileSink.for_bulk_format(
... OUTPUT_DIR, ParquetBulkWriters.for_row_type(
... row_type,
... hadoop_config=Configuration(),
... utc_timestamp=True,
... )
... ).build()
>>> ds.sink_to(sink)
.. versionadded:: 1.16.0
"""
@staticmethod
def for_row_type(row_type: 'RowType',
hadoop_config: Optional[Configuration] = None,
utc_timestamp: bool = False) -> 'BulkWriterFactory':
"""
Create a :class:`~pyflink.common.serialization.BulkWriterFactory` that writes records
with a predefined schema into Parquet files in a batch fashion.
:param row_type: The RowType of records, it should match the RowTypeInfo of Row records.
:param hadoop_config: Hadoop configuration.
:param utc_timestamp: Use UTC timezone or local timezone to the conversion between epoch
time and LocalDateTime. Hive 0.x/1.x/2.x use local timezone. But Hive 3.x use UTC
timezone.
"""
if not hadoop_config:
hadoop_config = Configuration()
from pyflink.table.types import _to_java_data_type
jvm = get_gateway().jvm
JParquetRowDataBuilder = jvm.org.apache.flink.formats.parquet.row.ParquetRowDataBuilder
return RowDataBulkWriterFactory(JParquetRowDataBuilder.createWriterFactory(
_to_java_data_type(row_type).getLogicalType(),
create_hadoop_configuration(hadoop_config),
utc_timestamp
), row_type)
| 9,309 | 42.302326 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/json.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.common import SerializationSchema, TypeInformation, typeinfo, DeserializationSchema
from pyflink.java_gateway import get_gateway
__all__ = [
'JsonRowDeserializationSchema',
'JsonRowSerializationSchema'
]
class JsonRowDeserializationSchema(DeserializationSchema):
"""
Deserialization schema from JSON to Flink types.
Deserializes a byte[] message as a JSON object and reads the specified fields.
Failures during deserialization are forwarded as wrapped IOExceptions.
"""
def __init__(self, j_deserialization_schema):
super(JsonRowDeserializationSchema, self).__init__(j_deserialization_schema)
@staticmethod
def builder():
"""
A static method to get a Builder for JsonRowDeserializationSchema.
"""
return JsonRowDeserializationSchema.Builder()
class Builder(object):
"""
Builder for JsonRowDeserializationSchema.
"""
def __init__(self):
self._type_info = None
self._fail_on_missing_field = False
self._ignore_parse_errors = False
def type_info(self, type_info: TypeInformation):
"""
Creates a JSON deserialization schema for the given type information.
:param type_info: Type information describing the result type. The field names of Row
are used to parse the JSON properties.
"""
self._type_info = type_info
return self
def json_schema(self, json_schema: str):
"""
Creates a JSON deserialization schema for the given JSON schema.
:param json_schema: JSON schema describing the result type.
"""
if json_schema is None:
raise TypeError("The json_schema must not be None.")
j_type_info = get_gateway().jvm \
.org.apache.flink.formats.json.JsonRowSchemaConverter.convert(json_schema)
self._type_info = typeinfo._from_java_type(j_type_info)
return self
def fail_on_missing_field(self):
"""
Configures schema to fail if a JSON field is missing. A missing field is ignored and the
field is set to null by default.
"""
self._fail_on_missing_field = True
return self
def ignore_parse_errors(self):
"""
Configures schema to fail when parsing json failed. An exception will be thrown when
parsing json fails.
"""
self._ignore_parse_errors = True
return self
def build(self):
JBuilder = get_gateway().jvm.org.apache.flink.formats.json.JsonRowDeserializationSchema\
.Builder
j_builder = JBuilder(self._type_info.get_java_type_info())
if self._fail_on_missing_field:
j_builder = j_builder.failOnMissingField()
if self._ignore_parse_errors:
j_builder = j_builder.ignoreParseErrors()
j_deserialization_schema = j_builder.build()
return JsonRowDeserializationSchema(j_deserialization_schema=j_deserialization_schema)
class JsonRowSerializationSchema(SerializationSchema):
"""
Serialization schema that serializes an object of Flink types into a JSON bytes. Serializes the
input Flink object into a JSON string and converts it into byte[].
Result byte[] message can be deserialized using JsonRowDeserializationSchema.
"""
def __init__(self, j_serialization_schema):
super(JsonRowSerializationSchema, self).__init__(j_serialization_schema)
@staticmethod
def builder():
return JsonRowSerializationSchema.Builder()
class Builder(object):
"""
Builder for JsonRowSerializationSchema.
"""
def __init__(self):
self._type_info = None
def with_type_info(self, type_info: TypeInformation):
"""
Creates a JSON serialization schema for the given type information.
:param type_info: Type information describing the result type. The field names of Row
are used to parse the JSON properties.
"""
self._type_info = type_info
return self
def build(self):
if self._type_info is None:
raise TypeError("Typeinfo should be set.")
j_builder = get_gateway().jvm \
.org.apache.flink.formats.json.JsonRowSerializationSchema.builder()
j_schema = j_builder.withTypeInfo(self._type_info.get_java_type_info()).build()
return JsonRowSerializationSchema(j_serialization_schema=j_schema)
| 5,720 | 36.887417 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/csv.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Optional, TYPE_CHECKING
from pyflink.common.serialization import BulkWriterFactory, RowDataBulkWriterFactory, \
SerializationSchema, DeserializationSchema
from pyflink.common.typeinfo import TypeInformation
from pyflink.datastream.connectors.file_system import StreamFormat
from pyflink.java_gateway import get_gateway
if TYPE_CHECKING:
from pyflink.table.types import DataType, RowType, NumericType
__all__ = [
'CsvSchema',
'CsvSchemaBuilder',
'CsvReaderFormat',
'CsvBulkWriters',
'CsvRowDeserializationSchema',
'CsvRowSerializationSchema'
]
class CsvSchema(object):
"""
CsvSchema holds schema information of a csv file, corresponding to Java
``com.fasterxml.jackson.dataformat.csv.CsvSchema`` class.
.. versionadded:: 1.16.0
"""
def __init__(self, j_schema, row_type: 'RowType'):
self._j_schema = j_schema
self._row_type = row_type
self._type_info = None
@staticmethod
def builder() -> 'CsvSchemaBuilder':
"""
Returns a :class:`CsvSchemaBuilder`.
"""
return CsvSchemaBuilder()
def size(self):
return self._j_schema.size()
def __len__(self):
return self.size()
def __str__(self):
return self._j_schema.toString()
def __repr__(self):
return str(self)
class CsvSchemaBuilder(object):
"""
CsvSchemaBuilder is for building a :class:`~CsvSchema`, corresponding to Java
``com.fasterxml.jackson.dataformat.csv.CsvSchema.Builder`` class.
.. versionadded:: 1.16.0
"""
def __init__(self):
jvm = get_gateway().jvm
self._j_schema_builder = jvm.org.apache.flink.shaded.jackson2.com.fasterxml.jackson \
.dataformat.csv.CsvSchema.builder()
self._fields = []
def build(self) -> 'CsvSchema':
"""
Build the :class:`~CsvSchema`.
"""
from pyflink.table.types import DataTypes
return CsvSchema(self._j_schema_builder.build(), DataTypes.ROW(self._fields))
def add_array_column(self,
name: str,
separator: str = ';',
element_type: Optional['DataType'] = None) \
-> 'CsvSchemaBuilder':
"""
Add an array column to schema, the type of elements could be specified via ``element_type``,
which should be primitive types.
:param name: Name of the column.
:param separator: Text separator of array elements, default to ``;``.
:param element_type: DataType of array elements, default to ``DataTypes.STRING()``.
"""
from pyflink.table.types import DataTypes
if element_type is None:
element_type = DataTypes.STRING()
self._j_schema_builder.addArrayColumn(name, separator)
self._fields.append(DataTypes.FIELD(name, DataTypes.ARRAY(element_type)))
return self
def add_boolean_column(self, name: str) -> 'CsvSchemaBuilder':
"""
Add a boolean column to schema, with type as ``DataTypes.BOOLEAN()``.
:param name: Name of the column.
"""
from pyflink.table.types import DataTypes
self._j_schema_builder.addBooleanColumn(name)
self._fields.append(DataTypes.FIELD(name, DataTypes.BOOLEAN()))
return self
def add_number_column(self, name: str,
number_type: Optional['NumericType'] = None) \
-> 'CsvSchemaBuilder':
"""
Add a number column to schema, the type of number could be specified via ``number_type``.
:param name: Name of the column.
:param number_type: DataType of the number, default to ``DataTypes.BIGINT()``.
"""
from pyflink.table.types import DataTypes
if number_type is None:
number_type = DataTypes.BIGINT()
self._j_schema_builder.addNumberColumn(name)
self._fields.append(DataTypes.FIELD(name, number_type))
return self
def add_string_column(self, name: str) -> 'CsvSchemaBuilder':
"""
Add a string column to schema, with type as ``DataTypes.STRING()``.
:param name: Name of the column.
"""
from pyflink.table.types import DataTypes
self._j_schema_builder.addColumn(name)
self._fields.append(DataTypes.FIELD(name, DataTypes.STRING()))
return self
def add_columns_from(self, schema: 'CsvSchema') -> 'CsvSchemaBuilder':
"""
Add all columns in ``schema`` to current schema.
:param schema: Another :class:`CsvSchema`.
"""
self._j_schema_builder.addColumnsFrom(schema._j_schema)
for field in schema._row_type:
self._fields.append(field)
return self
def clear_columns(self):
"""
Delete all columns in the schema.
"""
self._j_schema_builder.clearColumns()
self._fields.clear()
return self
def set_allow_comments(self, allow: bool = True):
"""
Allow using ``#`` prefixed comments in csv file.
"""
self._j_schema_builder.setAllowComments(allow)
return self
def set_any_property_name(self, name: str):
self._j_schema_builder.setAnyPropertyName(name)
return self
def disable_array_element_separator(self):
"""
Set array element separator to ``""``.
"""
self._j_schema_builder.disableArrayElementSeparator()
return self
def remove_array_element_separator(self, index: int):
"""
Set array element separator of a column specified by ``index`` to ``""``.
"""
self._j_schema_builder.removeArrayElementSeparator(index)
return self
def set_array_element_separator(self, separator: str):
"""
Set global array element separator, default to ``;``.
"""
self._j_schema_builder.setArrayElementSeparator(separator)
return self
def set_column_separator(self, char: str):
"""
Set column separator, ``char`` should be a single char, default to ``,``.
"""
if len(char) != 1:
raise ValueError('Column separator must be a single char, got {}'.format(char))
self._j_schema_builder.setColumnSeparator(char)
return self
def disable_escape_char(self):
"""
Disable escaping in csv file.
"""
self._j_schema_builder.disableEscapeChar()
return self
def set_escape_char(self, char: str):
"""
Set escape char, ``char`` should be a single char, default to no-escaping.
"""
if len(char) != 1:
raise ValueError('Escape char must be a single char, got {}'.format(char))
self._j_schema_builder.setEscapeChar(char)
return self
def set_line_separator(self, separator: str):
"""
Set line separator, default to ``\\n``. This is only configurable for writing, for reading,
``\\n``, ``\\r``, ``\\r\\n`` are recognized.
"""
self._j_schema_builder.setLineSeparator(separator)
return self
def set_null_value(self, null_value: str):
"""
Set literal for null value, default to empty sequence.
"""
self._j_schema_builder.setNullValue(null_value)
def disable_quote_char(self):
"""
Disable quote char.
"""
self._j_schema_builder.disableQuoteChar()
return self
def set_quote_char(self, char: str):
"""
Set quote char, default to ``"``.
"""
if len(char) != 1:
raise ValueError('Quote char must be a single char, got {}'.format(char))
self._j_schema_builder.setQuoteChar(char)
return self
def set_skip_first_data_row(self, skip: bool = True):
"""
Set whether to skip the first row of csv file.
"""
self._j_schema_builder.setSkipFirstDataRow(skip)
return self
def set_strict_headers(self, strict: bool = True):
"""
Set whether to use strict headers, which check column names in the header are consistent
with the schema.
"""
self._j_schema_builder.setStrictHeaders(strict)
return self
def set_use_header(self, use: bool = True):
"""
Set whether to read header.
"""
self._j_schema_builder.setUseHeader(use)
return self
def size(self):
return len(self._fields)
def __len__(self):
return self.size()
class CsvReaderFormat(StreamFormat):
"""
The :class:`~StreamFormat` for reading csv files.
Example:
::
>>> schema = CsvSchema.builder() \\
... .add_number_column('id', number_type=DataTypes.INT()) \\
... .add_string_column('name') \\
... .add_array_column('list', ',', element_type=DataTypes.STRING()) \\
... .set_column_separator('|') \\
... .set_escape_char('\\\\') \\
... .set_use_header() \\
... .set_strict_headers() \\
... .build()
>>> source = FileSource.for_record_stream_format(
... CsvReaderFormat.for_schema(schema), CSV_FILE_PATH).build()
>>> ds = env.from_source(source, WatermarkStrategy.no_watermarks(), 'csv-source')
>>> # the type of records is Types.ROW_NAMED(['id', 'name', 'list'],
>>> # [Types.INT(), Types.STRING(), Types.LIST(Types.STRING())])
.. versionadded:: 1.16.0
"""
def __init__(self, j_csv_format):
super().__init__(j_csv_format)
@staticmethod
def for_schema(schema: 'CsvSchema') -> 'CsvReaderFormat':
"""
Builds a :class:`CsvReaderFormat` using `CsvSchema`.
"""
from pyflink.table.types import _to_java_data_type
jvm = get_gateway().jvm
j_csv_format = jvm.org.apache.flink.formats.csv.PythonCsvUtils \
.createCsvReaderFormat(
schema._j_schema,
_to_java_data_type(schema._row_type)
)
return CsvReaderFormat(j_csv_format)
class CsvBulkWriters(object):
"""
CsvBulkWriter is for building :class:`~pyflink.common.serialization.BulkWriterFactory` to write
Rows with a predefined CSV schema to partitioned files in a bulk fashion.
Example:
::
>>> schema = CsvSchema.builder() \\
... .add_number_column('id', number_type=DataTypes.INT()) \\
... .add_string_column('name') \\
... .add_array_column('list', ',', element_type=DataTypes.STRING()) \\
... .set_column_separator('|') \\
... .build()
>>> sink = FileSink.for_bulk_format(
... OUTPUT_DIR, CsvBulkWriters.for_schema(schema)).build()
>>> ds.sink_to(sink)
.. versionadded:: 1.16.0
"""
@staticmethod
def for_schema(schema: 'CsvSchema') -> 'BulkWriterFactory':
"""
Creates a :class:`~pyflink.common.serialization.BulkWriterFactory` for writing records to
files in CSV format.
"""
from pyflink.table.types import _to_java_data_type
jvm = get_gateway().jvm
csv = jvm.org.apache.flink.formats.csv
j_factory = csv.PythonCsvUtils.createCsvBulkWriterFactory(
schema._j_schema,
_to_java_data_type(schema._row_type))
return RowDataBulkWriterFactory(j_factory, schema._row_type)
class CsvRowDeserializationSchema(DeserializationSchema):
"""
Deserialization schema from CSV to Flink types. Deserializes a byte[] message as a JsonNode and
converts it to Row.
Failure during deserialization are forwarded as wrapped IOException.
"""
def __init__(self, j_deserialization_schema):
super(CsvRowDeserializationSchema, self).__init__(
j_deserialization_schema=j_deserialization_schema)
class Builder(object):
"""
A builder for creating a CsvRowDeserializationSchema.
"""
def __init__(self, type_info: TypeInformation):
if type_info is None:
raise TypeError("Type information must not be None")
self._j_builder = get_gateway().jvm\
.org.apache.flink.formats.csv.CsvRowDeserializationSchema.Builder(
type_info.get_java_type_info())
def set_field_delimiter(self, delimiter: str):
self._j_builder = self._j_builder.setFieldDelimiter(delimiter)
return self
def set_allow_comments(self, allow_comments: bool):
self._j_builder = self._j_builder.setAllowComments(allow_comments)
return self
def set_array_element_delimiter(self, delimiter: str):
self._j_builder = self._j_builder.setArrayElementDelimiter(delimiter)
return self
def set_quote_character(self, c: str):
self._j_builder = self._j_builder.setQuoteCharacter(c)
return self
def set_escape_character(self, c: str):
self._j_builder = self._j_builder.setEscapeCharacter(c)
return self
def set_null_literal(self, null_literal: str):
self._j_builder = self._j_builder.setNullLiteral(null_literal)
return self
def set_ignore_parse_errors(self, ignore_parse_errors: bool):
self._j_builder = self._j_builder.setIgnoreParseErrors(ignore_parse_errors)
return self
def build(self):
j_csv_row_deserialization_schema = self._j_builder.build()
return CsvRowDeserializationSchema(
j_deserialization_schema=j_csv_row_deserialization_schema)
class CsvRowSerializationSchema(SerializationSchema):
"""
Serialization schema that serializes an object of Flink types into a CSV bytes. Serializes the
input row into an ObjectNode and converts it into byte[].
Result byte[] messages can be deserialized using CsvRowDeserializationSchema.
"""
def __init__(self, j_csv_row_serialization_schema):
super(CsvRowSerializationSchema, self).__init__(j_csv_row_serialization_schema)
class Builder(object):
"""
A builder for creating a CsvRowSerializationSchema.
"""
def __init__(self, type_info: TypeInformation):
if type_info is None:
raise TypeError("Type information must not be None")
self._j_builder = get_gateway().jvm\
.org.apache.flink.formats.csv.CsvRowSerializationSchema.Builder(
type_info.get_java_type_info())
def set_field_delimiter(self, c: str):
self._j_builder = self._j_builder.setFieldDelimiter(c)
return self
def set_line_delimiter(self, delimiter: str):
self._j_builder = self._j_builder.setLineDelimiter(delimiter)
return self
def set_array_element_delimiter(self, delimiter: str):
self._j_builder = self._j_builder.setArrayElementDelimiter(delimiter)
return self
def disable_quote_character(self):
self._j_builder = self._j_builder.disableQuoteCharacter()
return self
def set_quote_character(self, c: str):
self._j_builder = self._j_builder.setQuoteCharacter(c)
return self
def set_escape_character(self, c: str):
self._j_builder = self._j_builder.setEscapeCharacter(c)
return self
def set_null_literal(self, s: str):
self._j_builder = self._j_builder.setNullLiteral(s)
return self
def build(self):
j_serialization_schema = self._j_builder.build()
return CsvRowSerializationSchema(j_csv_row_serialization_schema=j_serialization_schema)
| 16,754 | 34.19958 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/datastream/formats/tests/test_csv.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import glob
import os
import tempfile
from typing import Tuple, List
from pyflink.common import WatermarkStrategy, Types
from pyflink.datastream import MapFunction
from pyflink.datastream.connectors.file_system import FileSource, FileSink
from pyflink.datastream.formats.csv import CsvSchema, CsvReaderFormat, CsvBulkWriters, \
CsvRowSerializationSchema, CsvRowDeserializationSchema
from pyflink.datastream.tests.test_util import DataStreamTestSinkFunction
from pyflink.java_gateway import get_gateway
from pyflink.table import DataTypes
from pyflink.testing.test_case_utils import PyFlinkStreamingTestCase, PyFlinkTestCase
from pyflink.util.java_utils import get_j_env_configuration
class FileSourceCsvReaderFormatTests(object):
def setUp(self):
super(FileSourceCsvReaderFormatTests, self).setUp()
self.test_sink = DataStreamTestSinkFunction()
self.csv_file_name = tempfile.mktemp(suffix='.csv', dir=self.tempdir)
def test_csv_primitive_column(self):
schema, lines = _create_csv_primitive_column_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_primitive_column')
_check_csv_primitive_column_results(self, self.test_sink.get_results(True, False))
def test_csv_add_columns_from(self):
original_schema, lines = _create_csv_primitive_column_schema_and_lines()
schema = CsvSchema.builder().add_columns_from(original_schema).build()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_schema_copy')
_check_csv_primitive_column_results(self, self.test_sink.get_results(True, False))
def test_csv_array_column(self):
schema, lines = _create_csv_array_column_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_array_column')
_check_csv_array_column_results(self, self.test_sink.get_results(True, False))
def test_csv_allow_comments(self):
schema, lines = _create_csv_allow_comments_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_allow_comments')
_check_csv_allow_comments_results(self, self.test_sink.get_results(True, False))
def test_csv_use_header(self):
schema, lines = _create_csv_use_header_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_use_header')
_check_csv_use_header_results(self, self.test_sink.get_results(True, False))
def test_csv_strict_headers(self):
schema, lines = _create_csv_strict_headers_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_strict_headers')
_check_csv_strict_headers_results(self, self.test_sink.get_results(True, False))
def test_csv_default_quote_char(self):
schema, lines = _create_csv_default_quote_char_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_default_quote_char')
_check_csv_default_quote_char_results(self, self.test_sink.get_results(True, False))
def test_csv_customize_quote_char(self):
schema, lines = _create_csv_customize_quote_char_schema_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_customize_quote_char')
_check_csv_customize_quote_char_results(self, self.test_sink.get_results(True, False))
def test_csv_use_escape_char(self):
schema, lines = _create_csv_set_escape_char_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_use_escape_char')
_check_csv_set_escape_char_results(self, self.test_sink.get_results(True, False))
def _build_csv_job(self, schema, lines):
with open(self.csv_file_name, 'w') as f:
for line in lines:
f.write(line)
source = FileSource.for_record_stream_format(
CsvReaderFormat.for_schema(schema), self.csv_file_name).build()
ds = self.env.from_source(source, WatermarkStrategy.no_watermarks(), 'csv-source')
ds.map(PassThroughMapFunction(), output_type=Types.PICKLED_BYTE_ARRAY()) \
.add_sink(self.test_sink)
class ProcessFileSourceCsvReaderFormatTests(FileSourceCsvReaderFormatTests,
PyFlinkStreamingTestCase):
pass
class EmbeddedFileSourceCsvReaderFormatTests(FileSourceCsvReaderFormatTests,
PyFlinkStreamingTestCase):
def setUp(self):
super(EmbeddedFileSourceCsvReaderFormatTests, self).setUp()
config = get_j_env_configuration(self.env._j_stream_execution_environment)
config.setString("python.execution-mode", "thread")
class FileSinkCsvBulkWriterTests(PyFlinkStreamingTestCase):
def setUp(self):
super().setUp()
self.env.set_parallelism(1)
self.csv_file_name = tempfile.mktemp(suffix='.csv', dir=self.tempdir)
self.csv_dir_name = tempfile.mkdtemp(dir=self.tempdir)
def test_csv_primitive_column_write(self):
schema, lines = _create_csv_primitive_column_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_primitive_column_write')
results = self._read_csv_file()
self.assertTrue(len(results) == 1)
self.assertEqual(
results[0],
'127,-32767,2147483647,-9223372036854775808,3.0E38,2.0E-308,2,true,string\n'
)
def test_csv_array_column_write(self):
schema, lines = _create_csv_array_column_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_array_column_write')
results = self._read_csv_file()
self.assertTrue(len(results) == 1)
self.assertListEqual(results, lines)
def test_csv_default_quote_char_write(self):
schema, lines = _create_csv_default_quote_char_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_default_quote_char_write')
results = self._read_csv_file()
self.assertTrue(len(results) == 1)
self.assertListEqual(results, lines)
def test_csv_customize_quote_char_write(self):
schema, lines = _create_csv_customize_quote_char_schema_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_customize_quote_char_write')
results = self._read_csv_file()
self.assertTrue(len(results) == 1)
self.assertListEqual(results, lines)
def test_csv_use_escape_char_write(self):
schema, lines = _create_csv_set_escape_char_schema_and_lines()
self._build_csv_job(schema, lines)
self.env.execute('test_csv_use_escape_char_write')
results = self._read_csv_file()
self.assertTrue(len(results) == 1)
self.assertListEqual(results, ['"string,","""string2"""\n'])
def _build_csv_job(self, schema: CsvSchema, lines):
with open(self.csv_file_name, 'w') as f:
for line in lines:
f.write(line)
source = FileSource.for_record_stream_format(
CsvReaderFormat.for_schema(schema), self.csv_file_name
).build()
ds = self.env.from_source(source, WatermarkStrategy.no_watermarks(), 'csv-source')
sink = FileSink.for_bulk_format(
self.csv_dir_name, CsvBulkWriters.for_schema(schema)
).build()
ds.sink_to(sink)
def _read_csv_file(self) -> List[str]:
lines = []
for file in glob.glob(os.path.join(self.csv_dir_name, '**/*')):
with open(file, 'r') as f:
lines.extend(f.readlines())
return lines
class JsonSerializationSchemaTests(PyFlinkTestCase):
def test_csv_row_serialization_schema(self):
jvm = get_gateway().jvm
JRow = jvm.org.apache.flink.types.Row
j_row = JRow(3)
j_row.setField(0, "BEGIN")
j_row.setField(2, "END")
def field_assertion(field_info, csv_value, value, field_delimiter):
row_info = Types.ROW([Types.STRING(), field_info, Types.STRING()])
expected_csv = "BEGIN" + field_delimiter + csv_value + field_delimiter + "END\n"
j_row.setField(1, value)
csv_row_serialization_schema = CsvRowSerializationSchema.Builder(row_info)\
.set_escape_character('*').set_quote_character('\'')\
.set_array_element_delimiter(':').set_field_delimiter(';').build()
csv_row_deserialization_schema = CsvRowDeserializationSchema.Builder(row_info)\
.set_escape_character('*').set_quote_character('\'')\
.set_array_element_delimiter(':').set_field_delimiter(';').build()
csv_row_serialization_schema._j_serialization_schema.open(
jvm.org.apache.flink.connector.testutils.formats.DummyInitializationContext())
csv_row_deserialization_schema._j_deserialization_schema.open(
jvm.org.apache.flink.connector.testutils.formats.DummyInitializationContext())
serialized_bytes = csv_row_serialization_schema._j_serialization_schema.serialize(j_row)
self.assertEqual(expected_csv, str(serialized_bytes, encoding='utf-8'))
j_deserialized_row = csv_row_deserialization_schema._j_deserialization_schema\
.deserialize(expected_csv.encode("utf-8"))
self.assertTrue(j_row.equals(j_deserialized_row))
field_assertion(Types.STRING(), "'123''4**'", "123'4*", ";")
field_assertion(Types.STRING(), "'a;b''c'", "a;b'c", ";")
field_assertion(Types.INT(), "12", 12, ";")
test_j_row = JRow(2)
test_j_row.setField(0, "1")
test_j_row.setField(1, "hello")
field_assertion(Types.ROW([Types.STRING(), Types.STRING()]), "'1:hello'", test_j_row, ";")
test_j_row.setField(1, "hello world")
field_assertion(Types.ROW([Types.STRING(), Types.STRING()]), "'1:hello world'", test_j_row,
";")
field_assertion(Types.STRING(), "null", "null", ";")
class PassThroughMapFunction(MapFunction):
def map(self, value):
return value
def _create_csv_primitive_column_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_number_column('tinyint', DataTypes.TINYINT()) \
.add_number_column('smallint', DataTypes.SMALLINT()) \
.add_number_column('int', DataTypes.INT()) \
.add_number_column('bigint', DataTypes.BIGINT()) \
.add_number_column('float', DataTypes.FLOAT()) \
.add_number_column('double', DataTypes.DOUBLE()) \
.add_number_column('decimal', DataTypes.DECIMAL(2, 0)) \
.add_boolean_column('boolean') \
.add_string_column('string') \
.build()
lines = [
'127,'
'-32767,'
'2147483647,'
'-9223372036854775808,'
'3e38,'
'2e-308,'
'1.5,'
'true,'
'string\n',
]
return schema, lines
def _check_csv_primitive_column_results(test, results):
row = results[0]
test.assertEqual(row['tinyint'], 127)
test.assertEqual(row['smallint'], -32767)
test.assertEqual(row['int'], 2147483647)
test.assertEqual(row['bigint'], -9223372036854775808)
test.assertAlmostEqual(row['float'], 3e38, delta=1e31)
test.assertAlmostEqual(row['double'], 2e-308, delta=2e-301)
test.assertAlmostEqual(row['decimal'], 2)
test.assertEqual(row['boolean'], True)
test.assertEqual(row['string'], 'string')
def _create_csv_array_column_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_array_column('number_array', separator=';', element_type=DataTypes.INT()) \
.add_array_column('boolean_array', separator=':', element_type=DataTypes.BOOLEAN()) \
.add_array_column('string_array', separator=',', element_type=DataTypes.STRING()) \
.set_column_separator('|') \
.disable_quote_char() \
.build()
lines = [
'1;2;3|'
'true:false|'
'a,b,c\n',
]
return schema, lines
def _check_csv_array_column_results(test, results):
row = results[0]
test.assertListEqual(list(row['number_array']), [1, 2, 3])
test.assertListEqual(list(row['boolean_array']), [True, False])
test.assertListEqual(list(row['string_array']), ['a', 'b', 'c'])
def _create_csv_allow_comments_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_string_column('string') \
.set_allow_comments() \
.build()
lines = [
'a\n',
'# this is comment\n',
'b\n',
]
return schema, lines
def _check_csv_allow_comments_results(test, results):
test.assertEqual(results[0]['string'], 'a')
test.assertEqual(results[1]['string'], 'b')
def _create_csv_use_header_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_string_column('string') \
.add_number_column('number') \
.set_use_header() \
.build()
lines = [
'h1,h2\n',
'string,123\n',
]
return schema, lines
def _check_csv_use_header_results(test, results):
row = results[0]
test.assertEqual(row['string'], 'string')
test.assertEqual(row['number'], 123)
def _create_csv_strict_headers_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_string_column('string') \
.add_number_column('number') \
.set_use_header() \
.set_strict_headers() \
.build()
lines = [
'string,number\n',
'string,123\n',
]
return schema, lines
def _check_csv_strict_headers_results(test, results):
row = results[0]
test.assertEqual(row['string'], 'string')
test.assertEqual(row['number'], 123)
def _create_csv_default_quote_char_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_string_column('string') \
.add_string_column('string2') \
.set_column_separator('|') \
.build()
lines = [
'"string"|"string2"\n',
]
return schema, lines
def _check_csv_default_quote_char_results(test, results):
row = results[0]
test.assertEqual(row['string'], 'string')
def _create_csv_customize_quote_char_schema_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_string_column('string') \
.add_string_column('string2') \
.set_column_separator('|') \
.set_quote_char('`') \
.build()
lines = [
'`string`|`string2`\n',
]
return schema, lines
def _check_csv_customize_quote_char_results(test, results):
row = results[0]
test.assertEqual(row['string'], 'string')
def _create_csv_set_escape_char_schema_and_lines() -> Tuple[CsvSchema, List[str]]:
schema = CsvSchema.builder() \
.add_string_column('string') \
.add_string_column('string2') \
.set_column_separator(',') \
.set_escape_char('\\') \
.build()
lines = [
'string\\,,\\"string2\\"\n',
]
return schema, lines
def _check_csv_set_escape_char_results(test, results):
row = results[0]
test.assertEqual(row['string'], 'string,')
test.assertEqual(row['string2'], '"string2"')
| 16,384 | 38.105012 | 100 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.