target
stringlengths
20
113k
src_fm
stringlengths
11
86.3k
src_fm_fc
stringlengths
21
86.4k
src_fm_fc_co
stringlengths
30
86.4k
src_fm_fc_ms
stringlengths
42
86.8k
src_fm_fc_ms_ff
stringlengths
43
86.8k
@Test(expected = NullPointerException.class) public void shouldThrowNullPointerOnRegisterIfStateStoreIsNull() { context.register(null, false, null); }
@Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { if (initialized) { throw new IllegalStateException("Can only create state stores during initialization."); } Objects.requireNonNull(store, "store must not be null"); stateManager.register(store, loggingEnabled, stateRestoreCallback); }
AbstractProcessorContext implements InternalProcessorContext { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { if (initialized) { throw new IllegalStateException("Can only create state stores during initialization."); } Objects.requireNonNull(store, "store must not be null"); stateManager.register(store, loggingEnabled, stateRestoreCallback); } }
AbstractProcessorContext implements InternalProcessorContext { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { if (initialized) { throw new IllegalStateException("Can only create state stores during initialization."); } Objects.requireNonNull(store, "store must not be null"); stateManager.register(store, loggingEnabled, stateRestoreCallback); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); }
AbstractProcessorContext implements InternalProcessorContext { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { if (initialized) { throw new IllegalStateException("Can only create state stores during initialization."); } Objects.requireNonNull(store, "store must not be null"); stateManager.register(store, loggingEnabled, stateRestoreCallback); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { if (initialized) { throw new IllegalStateException("Can only create state stores during initialization."); } Objects.requireNonNull(store, "store must not be null"); stateManager.register(store, loggingEnabled, stateRestoreCallback); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
@Test public void shouldReturnTopicFromRecordContext() throws Exception { assertThat(context.topic(), equalTo(recordContext.topic())); }
@Override public String topic() { if (recordContext == null) { throw new IllegalStateException("This should not happen as topic() should only be called while a record is processed"); } final String topic = recordContext.topic(); if (topic.equals(NONEXIST_TOPIC)) { return null; } return topic; }
AbstractProcessorContext implements InternalProcessorContext { @Override public String topic() { if (recordContext == null) { throw new IllegalStateException("This should not happen as topic() should only be called while a record is processed"); } final String topic = recordContext.topic(); if (topic.equals(NONEXIST_TOPIC)) { return null; } return topic; } }
AbstractProcessorContext implements InternalProcessorContext { @Override public String topic() { if (recordContext == null) { throw new IllegalStateException("This should not happen as topic() should only be called while a record is processed"); } final String topic = recordContext.topic(); if (topic.equals(NONEXIST_TOPIC)) { return null; } return topic; } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); }
AbstractProcessorContext implements InternalProcessorContext { @Override public String topic() { if (recordContext == null) { throw new IllegalStateException("This should not happen as topic() should only be called while a record is processed"); } final String topic = recordContext.topic(); if (topic.equals(NONEXIST_TOPIC)) { return null; } return topic; } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public String topic() { if (recordContext == null) { throw new IllegalStateException("This should not happen as topic() should only be called while a record is processed"); } final String topic = recordContext.topic(); if (topic.equals(NONEXIST_TOPIC)) { return null; } return topic; } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
@Test public void shouldReturnPartitionFromRecordContext() throws Exception { assertThat(context.partition(), equalTo(recordContext.partition())); }
@Override public int partition() { if (recordContext == null) { throw new IllegalStateException("This should not happen as partition() should only be called while a record is processed"); } return recordContext.partition(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public int partition() { if (recordContext == null) { throw new IllegalStateException("This should not happen as partition() should only be called while a record is processed"); } return recordContext.partition(); } }
AbstractProcessorContext implements InternalProcessorContext { @Override public int partition() { if (recordContext == null) { throw new IllegalStateException("This should not happen as partition() should only be called while a record is processed"); } return recordContext.partition(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); }
AbstractProcessorContext implements InternalProcessorContext { @Override public int partition() { if (recordContext == null) { throw new IllegalStateException("This should not happen as partition() should only be called while a record is processed"); } return recordContext.partition(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public int partition() { if (recordContext == null) { throw new IllegalStateException("This should not happen as partition() should only be called while a record is processed"); } return recordContext.partition(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
@Test public void shouldReturnOffsetFromRecordContext() throws Exception { assertThat(context.offset(), equalTo(recordContext.offset())); }
@Override public long offset() { if (recordContext == null) { throw new IllegalStateException("This should not happen as offset() should only be called while a record is processed"); } return recordContext.offset(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public long offset() { if (recordContext == null) { throw new IllegalStateException("This should not happen as offset() should only be called while a record is processed"); } return recordContext.offset(); } }
AbstractProcessorContext implements InternalProcessorContext { @Override public long offset() { if (recordContext == null) { throw new IllegalStateException("This should not happen as offset() should only be called while a record is processed"); } return recordContext.offset(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); }
AbstractProcessorContext implements InternalProcessorContext { @Override public long offset() { if (recordContext == null) { throw new IllegalStateException("This should not happen as offset() should only be called while a record is processed"); } return recordContext.offset(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public long offset() { if (recordContext == null) { throw new IllegalStateException("This should not happen as offset() should only be called while a record is processed"); } return recordContext.offset(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
@Test public void shouldReturnTimestampFromRecordContext() throws Exception { assertThat(context.timestamp(), equalTo(recordContext.timestamp())); }
@Override public long timestamp() { if (recordContext == null) { throw new IllegalStateException("This should not happen as timestamp() should only be called while a record is processed"); } return recordContext.timestamp(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public long timestamp() { if (recordContext == null) { throw new IllegalStateException("This should not happen as timestamp() should only be called while a record is processed"); } return recordContext.timestamp(); } }
AbstractProcessorContext implements InternalProcessorContext { @Override public long timestamp() { if (recordContext == null) { throw new IllegalStateException("This should not happen as timestamp() should only be called while a record is processed"); } return recordContext.timestamp(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); }
AbstractProcessorContext implements InternalProcessorContext { @Override public long timestamp() { if (recordContext == null) { throw new IllegalStateException("This should not happen as timestamp() should only be called while a record is processed"); } return recordContext.timestamp(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
AbstractProcessorContext implements InternalProcessorContext { @Override public long timestamp() { if (recordContext == null) { throw new IllegalStateException("This should not happen as timestamp() should only be called while a record is processed"); } return recordContext.timestamp(); } AbstractProcessorContext(final TaskId taskId, final String applicationId, final StreamsConfig config, final StreamsMetrics metrics, final StateManager stateManager, final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }
@Test public void shouldReturnFalseWhenSuppliedNullTopicDescription() { Cluster cluster = createCluster(1); try (MockKafkaAdminClientEnv env = new MockKafkaAdminClientEnv(cluster)) { env.kafkaClient().setNode(cluster.controller()); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); TopicAdmin admin = new TopicAdmin(null, env.adminClient()); boolean created = admin.createTopic(null); assertFalse(created); } }
public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); }
TopicAdmin implements AutoCloseable { public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); } }
TopicAdmin implements AutoCloseable { public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); } TopicAdmin(Map<String, Object> adminConfig); TopicAdmin(Map<String, Object> adminConfig, AdminClient adminClient); }
TopicAdmin implements AutoCloseable { public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); } TopicAdmin(Map<String, Object> adminConfig); TopicAdmin(Map<String, Object> adminConfig, AdminClient adminClient); static NewTopicBuilder defineTopic(String topicName); boolean createTopic(NewTopic topic); Set<String> createTopics(NewTopic... topics); @Override void close(); }
TopicAdmin implements AutoCloseable { public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); } TopicAdmin(Map<String, Object> adminConfig); TopicAdmin(Map<String, Object> adminConfig, AdminClient adminClient); static NewTopicBuilder defineTopic(String topicName); boolean createTopic(NewTopic topic); Set<String> createTopics(NewTopic... topics); @Override void close(); }
@Test public void testStorePartitions() throws Exception { StreamsConfig config = createConfig(baseDir); StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory); assertEquals(Utils.mkSet(partition2), new HashSet<>(task.checkpointedOffsets().keySet())); }
Map<TopicPartition, Long> checkpointedOffsets() { return checkpointedOffsets; }
StandbyTask extends AbstractTask { Map<TopicPartition, Long> checkpointedOffsets() { return checkpointedOffsets; } }
StandbyTask extends AbstractTask { Map<TopicPartition, Long> checkpointedOffsets() { return checkpointedOffsets; } StandbyTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory); }
StandbyTask extends AbstractTask { Map<TopicPartition, Long> checkpointedOffsets() { return checkpointedOffsets; } StandbyTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory); @Override void resume(); @Override void commit(); @Override void suspend(); @Override void close(final boolean clean); List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records); }
StandbyTask extends AbstractTask { Map<TopicPartition, Long> checkpointedOffsets() { return checkpointedOffsets; } StandbyTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory); @Override void resume(); @Override void commit(); @Override void suspend(); @Override void close(final boolean clean); List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records); }
@SuppressWarnings("unchecked") @Test public void testUpdate() throws Exception { StreamsConfig config = createConfig(baseDir); StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory); restoreStateConsumer.assign(new ArrayList<>(task.checkpointedOffsets().keySet())); for (ConsumerRecord<Integer, Integer> record : Arrays.asList( new ConsumerRecord<>(partition2.topic(), partition2.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100))) { restoreStateConsumer.bufferRecord(record); } for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) { TopicPartition partition = entry.getKey(); long offset = entry.getValue(); if (offset >= 0) { restoreStateConsumer.seek(partition, offset); } else { restoreStateConsumer.seekToBeginning(singleton(partition)); } } task.update(partition2, restoreStateConsumer.poll(100).records(partition2)); StandbyContextImpl context = (StandbyContextImpl) task.context(); MockStateStoreSupplier.MockStateStore store1 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName1); MockStateStoreSupplier.MockStateStore store2 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName2); assertEquals(Collections.emptyList(), store1.keys); assertEquals(Utils.mkList(1, 2, 3), store2.keys); task.closeStateManager(true); File taskDir = stateDirectory.directoryForTask(taskId); OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME)); Map<TopicPartition, Long> offsets = checkpoint.read(); assertEquals(1, offsets.size()); assertEquals(new Long(30L + 1L), offsets.get(partition2)); }
public List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records) { log.debug("{} Updating standby replicas of its state store for partition [{}]", logPrefix, partition); return stateMgr.updateStandbyStates(partition, records); }
StandbyTask extends AbstractTask { public List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records) { log.debug("{} Updating standby replicas of its state store for partition [{}]", logPrefix, partition); return stateMgr.updateStandbyStates(partition, records); } }
StandbyTask extends AbstractTask { public List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records) { log.debug("{} Updating standby replicas of its state store for partition [{}]", logPrefix, partition); return stateMgr.updateStandbyStates(partition, records); } StandbyTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory); }
StandbyTask extends AbstractTask { public List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records) { log.debug("{} Updating standby replicas of its state store for partition [{}]", logPrefix, partition); return stateMgr.updateStandbyStates(partition, records); } StandbyTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory); @Override void resume(); @Override void commit(); @Override void suspend(); @Override void close(final boolean clean); List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records); }
StandbyTask extends AbstractTask { public List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records) { log.debug("{} Updating standby replicas of its state store for partition [{}]", logPrefix, partition); return stateMgr.updateStandbyStates(partition, records); } StandbyTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory); @Override void resume(); @Override void commit(); @Override void suspend(); @Override void close(final boolean clean); List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records); }
@Test public void shouldCreateTaskStateDirectory() throws Exception { final TaskId taskId = new TaskId(0, 0); final File taskDirectory = directory.directoryForTask(taskId); assertTrue(taskDirectory.exists()); assertTrue(taskDirectory.isDirectory()); }
File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
@Test(expected = ProcessorStateException.class) public void shouldThrowProcessorStateException() throws Exception { final TaskId taskId = new TaskId(0, 0); Utils.delete(stateDir); directory.directoryForTask(taskId); }
File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
@Test public void shouldNotLockDeletedDirectory() throws Exception { final TaskId taskId = new TaskId(0, 0); Utils.delete(stateDir); assertFalse(directory.lock(taskId, 0)); }
boolean lock(final TaskId taskId, int retry) throws IOException { final File lockFile; if (locks.containsKey(taskId)) { log.trace("{} Found cached state dir lock for task {}", logPrefix, taskId); return true; } try { lockFile = new File(directoryForTask(taskId), LOCK_FILE_NAME); } catch (ProcessorStateException e) { return false; } final FileChannel channel; try { channel = getOrCreateFileChannel(taskId, lockFile.toPath()); } catch (NoSuchFileException e) { return false; } final FileLock lock = tryLock(retry, channel); if (lock != null) { locks.put(taskId, lock); log.debug("{} Acquired state dir lock for task {}", logPrefix, taskId); } return lock != null; }
StateDirectory { boolean lock(final TaskId taskId, int retry) throws IOException { final File lockFile; if (locks.containsKey(taskId)) { log.trace("{} Found cached state dir lock for task {}", logPrefix, taskId); return true; } try { lockFile = new File(directoryForTask(taskId), LOCK_FILE_NAME); } catch (ProcessorStateException e) { return false; } final FileChannel channel; try { channel = getOrCreateFileChannel(taskId, lockFile.toPath()); } catch (NoSuchFileException e) { return false; } final FileLock lock = tryLock(retry, channel); if (lock != null) { locks.put(taskId, lock); log.debug("{} Acquired state dir lock for task {}", logPrefix, taskId); } return lock != null; } }
StateDirectory { boolean lock(final TaskId taskId, int retry) throws IOException { final File lockFile; if (locks.containsKey(taskId)) { log.trace("{} Found cached state dir lock for task {}", logPrefix, taskId); return true; } try { lockFile = new File(directoryForTask(taskId), LOCK_FILE_NAME); } catch (ProcessorStateException e) { return false; } final FileChannel channel; try { channel = getOrCreateFileChannel(taskId, lockFile.toPath()); } catch (NoSuchFileException e) { return false; } final FileLock lock = tryLock(retry, channel); if (lock != null) { locks.put(taskId, lock); log.debug("{} Acquired state dir lock for task {}", logPrefix, taskId); } return lock != null; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); }
StateDirectory { boolean lock(final TaskId taskId, int retry) throws IOException { final File lockFile; if (locks.containsKey(taskId)) { log.trace("{} Found cached state dir lock for task {}", logPrefix, taskId); return true; } try { lockFile = new File(directoryForTask(taskId), LOCK_FILE_NAME); } catch (ProcessorStateException e) { return false; } final FileChannel channel; try { channel = getOrCreateFileChannel(taskId, lockFile.toPath()); } catch (NoSuchFileException e) { return false; } final FileLock lock = tryLock(retry, channel); if (lock != null) { locks.put(taskId, lock); log.debug("{} Acquired state dir lock for task {}", logPrefix, taskId); } return lock != null; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
StateDirectory { boolean lock(final TaskId taskId, int retry) throws IOException { final File lockFile; if (locks.containsKey(taskId)) { log.trace("{} Found cached state dir lock for task {}", logPrefix, taskId); return true; } try { lockFile = new File(directoryForTask(taskId), LOCK_FILE_NAME); } catch (ProcessorStateException e) { return false; } final FileChannel channel; try { channel = getOrCreateFileChannel(taskId, lockFile.toPath()); } catch (NoSuchFileException e) { return false; } final FileLock lock = tryLock(retry, channel); if (lock != null) { locks.put(taskId, lock); log.debug("{} Acquired state dir lock for task {}", logPrefix, taskId); } return lock != null; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
@Test public void shouldNotRemoveNonTaskDirectoriesAndFiles() throws Exception { final File otherDir = TestUtils.tempDirectory(stateDir.toPath(), "foo"); directory.cleanRemovedTasks(0); assertTrue(otherDir.exists()); }
public void cleanRemovedTasks(final long cleanupDelayMs) { final File[] taskDirs = listTaskDirectories(); if (taskDirs == null || taskDirs.length == 0) { return; } for (File taskDir : taskDirs) { final String dirName = taskDir.getName(); TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try { if (lock(id, 0)) { if (time.milliseconds() > taskDir.lastModified() + cleanupDelayMs) { log.info("{} Deleting obsolete state directory {} for task {} as cleanup delay of {} ms has passed", logPrefix, dirName, id, cleanupDelayMs); Utils.delete(taskDir); } } } catch (OverlappingFileLockException e) { } catch (IOException e) { log.error("{} Failed to lock the state directory due to an unexpected exception", logPrefix, e); } finally { try { unlock(id); } catch (IOException e) { log.error("{} Failed to release the state directory lock", logPrefix); } } } } }
StateDirectory { public void cleanRemovedTasks(final long cleanupDelayMs) { final File[] taskDirs = listTaskDirectories(); if (taskDirs == null || taskDirs.length == 0) { return; } for (File taskDir : taskDirs) { final String dirName = taskDir.getName(); TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try { if (lock(id, 0)) { if (time.milliseconds() > taskDir.lastModified() + cleanupDelayMs) { log.info("{} Deleting obsolete state directory {} for task {} as cleanup delay of {} ms has passed", logPrefix, dirName, id, cleanupDelayMs); Utils.delete(taskDir); } } } catch (OverlappingFileLockException e) { } catch (IOException e) { log.error("{} Failed to lock the state directory due to an unexpected exception", logPrefix, e); } finally { try { unlock(id); } catch (IOException e) { log.error("{} Failed to release the state directory lock", logPrefix); } } } } } }
StateDirectory { public void cleanRemovedTasks(final long cleanupDelayMs) { final File[] taskDirs = listTaskDirectories(); if (taskDirs == null || taskDirs.length == 0) { return; } for (File taskDir : taskDirs) { final String dirName = taskDir.getName(); TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try { if (lock(id, 0)) { if (time.milliseconds() > taskDir.lastModified() + cleanupDelayMs) { log.info("{} Deleting obsolete state directory {} for task {} as cleanup delay of {} ms has passed", logPrefix, dirName, id, cleanupDelayMs); Utils.delete(taskDir); } } } catch (OverlappingFileLockException e) { } catch (IOException e) { log.error("{} Failed to lock the state directory due to an unexpected exception", logPrefix, e); } finally { try { unlock(id); } catch (IOException e) { log.error("{} Failed to release the state directory lock", logPrefix); } } } } } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); }
StateDirectory { public void cleanRemovedTasks(final long cleanupDelayMs) { final File[] taskDirs = listTaskDirectories(); if (taskDirs == null || taskDirs.length == 0) { return; } for (File taskDir : taskDirs) { final String dirName = taskDir.getName(); TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try { if (lock(id, 0)) { if (time.milliseconds() > taskDir.lastModified() + cleanupDelayMs) { log.info("{} Deleting obsolete state directory {} for task {} as cleanup delay of {} ms has passed", logPrefix, dirName, id, cleanupDelayMs); Utils.delete(taskDir); } } } catch (OverlappingFileLockException e) { } catch (IOException e) { log.error("{} Failed to lock the state directory due to an unexpected exception", logPrefix, e); } finally { try { unlock(id); } catch (IOException e) { log.error("{} Failed to release the state directory lock", logPrefix); } } } } } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
StateDirectory { public void cleanRemovedTasks(final long cleanupDelayMs) { final File[] taskDirs = listTaskDirectories(); if (taskDirs == null || taskDirs.length == 0) { return; } for (File taskDir : taskDirs) { final String dirName = taskDir.getName(); TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try { if (lock(id, 0)) { if (time.milliseconds() > taskDir.lastModified() + cleanupDelayMs) { log.info("{} Deleting obsolete state directory {} for task {} as cleanup delay of {} ms has passed", logPrefix, dirName, id, cleanupDelayMs); Utils.delete(taskDir); } } } catch (OverlappingFileLockException e) { } catch (IOException e) { log.error("{} Failed to lock the state directory due to an unexpected exception", logPrefix, e); } finally { try { unlock(id); } catch (IOException e) { log.error("{} Failed to release the state directory lock", logPrefix); } } } } } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
@Test public void shouldCreateDirectoriesIfParentDoesntExist() throws Exception { final File tempDir = TestUtils.tempDirectory(); final File stateDir = new File(new File(tempDir, "foo"), "state-dir"); final StateDirectory stateDirectory = new StateDirectory(applicationId, stateDir.getPath(), time); final File taskDir = stateDirectory.directoryForTask(new TaskId(0, 0)); assertTrue(stateDir.exists()); assertTrue(taskDir.exists()); }
File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }
@Test public void testMetrics() throws Exception { final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); final String defaultGroupName = "stream-metrics"; final String defaultPrefix = "thread." + thread.threadClientId(); final Map<String, String> defaultTags = Collections.singletonMap("client-id", thread.threadClientId()); assertNotNull(metrics.getSensor(defaultPrefix + ".commit-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".poll-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".process-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".punctuate-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".task-created")); assertNotNull(metrics.getSensor(defaultPrefix + ".task-closed")); assertNotNull(metrics.getSensor(defaultPrefix + ".skipped-records")); assertNotNull(metrics.metrics().get(metrics.metricName("commit-latency-avg", defaultGroupName, "The average commit time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("commit-latency-max", defaultGroupName, "The maximum commit time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("commit-rate", defaultGroupName, "The average per-second number of commit calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("poll-latency-avg", defaultGroupName, "The average poll time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("poll-latency-max", defaultGroupName, "The maximum poll time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("poll-rate", defaultGroupName, "The average per-second number of record-poll calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("process-latency-avg", defaultGroupName, "The average process time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("process-latency-max", defaultGroupName, "The maximum process time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("process-rate", defaultGroupName, "The average per-second number of process calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-latency-avg", defaultGroupName, "The average punctuate time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-latency-max", defaultGroupName, "The maximum punctuate time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-rate", defaultGroupName, "The average per-second number of punctuate calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("task-created-rate", defaultGroupName, "The average per-second number of newly created tasks", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("task-closed-rate", defaultGroupName, "The average per-second number of closed tasks", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("skipped-records-rate", defaultGroupName, "The average per-second number of skipped records.", defaultTags))); }
String threadClientId() { return threadClientId; }
StreamThread extends Thread { String threadClientId() { return threadClientId; } }
StreamThread extends Thread { String threadClientId() { return threadClientId; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { String threadClientId() { return threadClientId; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { String threadClientId() { return threadClientId; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void testMaybeClean() throws Exception { final File baseDir = Files.createTempDirectory("test").toFile(); try { final long cleanupDelay = 1000L; final Properties props = configProps(false); props.setProperty(StreamsConfig.STATE_CLEANUP_DELAY_MS_CONFIG, Long.toString(cleanupDelay)); props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath()); final StreamsConfig config = new StreamsConfig(props); final File applicationDir = new File(baseDir, applicationId); applicationDir.mkdir(); final File stateDir1 = new File(applicationDir, task1.toString()); final File stateDir2 = new File(applicationDir, task2.toString()); final File stateDir3 = new File(applicationDir, task3.toString()); final File extraDir = new File(applicationDir, applicationId); stateDir1.mkdir(); stateDir2.mkdir(); stateDir3.mkdir(); extraDir.mkdir(); builder.addSource("source1", "topic1"); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override public void maybeClean(final long now) { super.maybeClean(now); } @Override protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitionsForTask) { final ProcessorTopology topology = builder.build(id.topicGroupId); return new TestStreamTask( id, applicationId, partitionsForTask, topology, consumer, clientSupplier.getProducer(new HashMap<String, Object>()), restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory); } }; initPartitionGrouper(config, thread, clientSupplier); assertTrue(thread.tasks().isEmpty()); mockTime.sleep(cleanupDelay); assertTrue(stateDir1.exists()); assertTrue(stateDir2.exists()); assertTrue(stateDir3.exists()); assertTrue(extraDir.exists()); List<TopicPartition> revokedPartitions; List<TopicPartition> assignedPartitions; Map<TaskId, StreamTask> prevTasks; final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>(); activeTasks.put(task1, Collections.singleton(t1p1)); activeTasks.put(task2, Collections.singleton(t1p2)); thread.setPartitionAssignor(new MockStreamsPartitionAssignor(activeTasks)); revokedPartitions = Collections.emptyList(); assignedPartitions = Arrays.asList(t1p1, t1p2); prevTasks = new HashMap<>(thread.tasks()); final ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener; rebalanceListener.onPartitionsRevoked(revokedPartitions); rebalanceListener.onPartitionsAssigned(assignedPartitions); assertTrue(prevTasks.isEmpty()); assertEquals(2, thread.tasks().size()); mockTime.sleep(cleanupDelay - 10L); thread.maybeClean(mockTime.milliseconds()); assertTrue(stateDir1.exists()); assertTrue(stateDir2.exists()); assertTrue(stateDir3.exists()); assertTrue(extraDir.exists()); mockTime.sleep(11L); thread.maybeClean(mockTime.milliseconds()); assertTrue(stateDir1.exists()); assertTrue(stateDir2.exists()); assertFalse(stateDir3.exists()); assertTrue(extraDir.exists()); activeTasks.clear(); revokedPartitions = assignedPartitions; assignedPartitions = Collections.emptyList(); prevTasks = new HashMap<>(thread.tasks()); rebalanceListener.onPartitionsRevoked(revokedPartitions); rebalanceListener.onPartitionsAssigned(assignedPartitions); assertEquals(2, prevTasks.size()); for (final StreamTask task : prevTasks.values()) { assertTrue(((TestStreamTask) task).committed); ((TestStreamTask) task).committed = false; } assertTrue(thread.tasks().isEmpty()); mockTime.sleep(cleanupDelay - 10L); thread.maybeClean(mockTime.milliseconds()); assertTrue(stateDir1.exists()); assertTrue(stateDir2.exists()); assertFalse(stateDir3.exists()); assertTrue(extraDir.exists()); mockTime.sleep(11L); thread.maybeClean(mockTime.milliseconds()); assertFalse(stateDir1.exists()); assertFalse(stateDir2.exists()); assertFalse(stateDir3.exists()); assertTrue(extraDir.exists()); } finally { Utils.delete(baseDir); } }
protected void maybeClean(final long now) { if (now > lastCleanMs + cleanTimeMs) { stateDirectory.cleanRemovedTasks(cleanTimeMs); lastCleanMs = now; } }
StreamThread extends Thread { protected void maybeClean(final long now) { if (now > lastCleanMs + cleanTimeMs) { stateDirectory.cleanRemovedTasks(cleanTimeMs); lastCleanMs = now; } } }
StreamThread extends Thread { protected void maybeClean(final long now) { if (now > lastCleanMs + cleanTimeMs) { stateDirectory.cleanRemovedTasks(cleanTimeMs); lastCleanMs = now; } } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { protected void maybeClean(final long now) { if (now > lastCleanMs + cleanTimeMs) { stateDirectory.cleanRemovedTasks(cleanTimeMs); lastCleanMs = now; } } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { protected void maybeClean(final long now) { if (now > lastCleanMs + cleanTimeMs) { stateDirectory.cleanRemovedTasks(cleanTimeMs); lastCleanMs = now; } } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void testMaybeCommit() throws Exception { final File baseDir = Files.createTempDirectory("test").toFile(); try { final long commitInterval = 1000L; final Properties props = configProps(false); props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath()); props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval)); final StreamsConfig config = new StreamsConfig(props); builder.addSource("source1", "topic1"); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override public void maybeCommit(final long now) { super.maybeCommit(now); } @Override protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitionsForTask) { final ProcessorTopology topology = builder.build(id.topicGroupId); return new TestStreamTask( id, applicationId, partitionsForTask, topology, consumer, clientSupplier.getProducer(new HashMap<String, Object>()), restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory); } }; initPartitionGrouper(config, thread, clientSupplier); final ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener; final List<TopicPartition> revokedPartitions; final List<TopicPartition> assignedPartitions; revokedPartitions = Collections.emptyList(); assignedPartitions = Arrays.asList(t1p1, t1p2); rebalanceListener.onPartitionsRevoked(revokedPartitions); rebalanceListener.onPartitionsAssigned(assignedPartitions); assertEquals(2, thread.tasks().size()); mockTime.sleep(commitInterval - 10L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertFalse(((TestStreamTask) task).committed); } mockTime.sleep(11L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertTrue(((TestStreamTask) task).committed); ((TestStreamTask) task).committed = false; } mockTime.sleep(commitInterval - 10L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertFalse(((TestStreamTask) task).committed); } mockTime.sleep(11L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertTrue(((TestStreamTask) task).committed); ((TestStreamTask) task).committed = false; } } finally { Utils.delete(baseDir); } }
protected void maybeCommit(final long now) { if (commitTimeMs >= 0 && lastCommitMs + commitTimeMs < now) { log.debug("{} Committing all active tasks {} and standby tasks {} because the commit interval {}ms has elapsed by {}ms", logPrefix, activeTasks.keySet(), standbyTasks.keySet(), commitTimeMs, now - lastCommitMs); commitAll(); lastCommitMs = now; processStandbyRecords = true; } }
StreamThread extends Thread { protected void maybeCommit(final long now) { if (commitTimeMs >= 0 && lastCommitMs + commitTimeMs < now) { log.debug("{} Committing all active tasks {} and standby tasks {} because the commit interval {}ms has elapsed by {}ms", logPrefix, activeTasks.keySet(), standbyTasks.keySet(), commitTimeMs, now - lastCommitMs); commitAll(); lastCommitMs = now; processStandbyRecords = true; } } }
StreamThread extends Thread { protected void maybeCommit(final long now) { if (commitTimeMs >= 0 && lastCommitMs + commitTimeMs < now) { log.debug("{} Committing all active tasks {} and standby tasks {} because the commit interval {}ms has elapsed by {}ms", logPrefix, activeTasks.keySet(), standbyTasks.keySet(), commitTimeMs, now - lastCommitMs); commitAll(); lastCommitMs = now; processStandbyRecords = true; } } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { protected void maybeCommit(final long now) { if (commitTimeMs >= 0 && lastCommitMs + commitTimeMs < now) { log.debug("{} Committing all active tasks {} and standby tasks {} because the commit interval {}ms has elapsed by {}ms", logPrefix, activeTasks.keySet(), standbyTasks.keySet(), commitTimeMs, now - lastCommitMs); commitAll(); lastCommitMs = now; processStandbyRecords = true; } } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { protected void maybeCommit(final long now) { if (commitTimeMs >= 0 && lastCommitMs + commitTimeMs < now) { log.debug("{} Committing all active tasks {} and standby tasks {} because the commit interval {}ms has elapsed by {}ms", logPrefix, activeTasks.keySet(), standbyTasks.keySet(), commitTimeMs, now - lastCommitMs); commitAll(); lastCommitMs = now; processStandbyRecords = true; } } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void bytesToConnect() throws UnsupportedEncodingException { ByteBuffer reference = ByteBuffer.wrap("test-string".getBytes("UTF-8")); String msg = "{ \"schema\": { \"type\": \"bytes\" }, \"payload\": \"dGVzdC1zdHJpbmc=\" }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); ByteBuffer converted = ByteBuffer.wrap((byte[]) schemaAndValue.value()); assertEquals(reference, converted); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void shouldNotNullPointerWhenStandbyTasksAssignedAndNoStateStoresForTopology() throws Exception { builder.addSource("name", "topic").addSink("out", "output"); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); thread.setPartitionAssignor(new StreamPartitionAssignor() { @Override Map<TaskId, Set<TopicPartition>> standbyTasks() { return Collections.singletonMap(new TaskId(0, 0), Utils.mkSet(new TopicPartition("topic", 0))); } }); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList()); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void shouldInitializeRestoreConsumerWithOffsetsFromStandbyTasks() throws Exception { final KStreamBuilder builder = new KStreamBuilder(); builder.setApplicationId(applicationId); builder.stream("t1").groupByKey().count("count-one"); builder.stream("t2").groupByKey().count("count-two"); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer; restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0]))); restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog", 0, null, new Node[0], new Node[0]))); final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>(); final TopicPartition t1 = new TopicPartition("t1", 0); standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1)); thread.setPartitionAssignor(new StreamPartitionAssignor() { @Override Map<TaskId, Set<TopicPartition>> standbyTasks() { return standbyTasks; } }); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList()); assertThat(restoreConsumer.assignment(), equalTo(Utils.mkSet(new TopicPartition("stream-thread-test-count-one-changelog", 0)))); standbyTasks.put(new TaskId(1, 0), Utils.mkSet(new TopicPartition("t2", 0))); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(Collections.<TopicPartition>emptyList()); assertThat(restoreConsumer.assignment(), equalTo(Utils.mkSet(new TopicPartition("stream-thread-test-count-one-changelog", 0), new TopicPartition("stream-thread-test-count-two-changelog", 0)))); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void shouldCloseSuspendedTasksThatAreNoLongerAssignedToThisStreamThreadBeforeCreatingNewTasks() throws Exception { final KStreamBuilder builder = new KStreamBuilder(); builder.setApplicationId(applicationId); builder.stream("t1").groupByKey().count("count-one"); builder.stream("t2").groupByKey().count("count-two"); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); final MockConsumer<byte[], byte[]> restoreConsumer = clientSupplier.restoreConsumer; restoreConsumer.updatePartitions("stream-thread-test-count-one-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-one-changelog", 0, null, new Node[0], new Node[0]))); restoreConsumer.updatePartitions("stream-thread-test-count-two-changelog", Collections.singletonList(new PartitionInfo("stream-thread-test-count-two-changelog", 0, null, new Node[0], new Node[0]))); final HashMap<TopicPartition, Long> offsets = new HashMap<>(); offsets.put(new TopicPartition("stream-thread-test-count-one-changelog", 0), 0L); offsets.put(new TopicPartition("stream-thread-test-count-two-changelog", 0), 0L); restoreConsumer.updateEndOffsets(offsets); restoreConsumer.updateBeginningOffsets(offsets); final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>(); final TopicPartition t1 = new TopicPartition("t1", 0); standbyTasks.put(new TaskId(0, 0), Utils.mkSet(t1)); final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>(); final TopicPartition t2 = new TopicPartition("t2", 0); activeTasks.put(new TaskId(1, 0), Utils.mkSet(t2)); thread.setPartitionAssignor(new StreamPartitionAssignor() { @Override Map<TaskId, Set<TopicPartition>> standbyTasks() { return standbyTasks; } @Override Map<TaskId, Set<TopicPartition>> activeTasks() { return activeTasks; } }); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t2)); standbyTasks.clear(); activeTasks.clear(); standbyTasks.put(new TaskId(1, 0), Utils.mkSet(t2)); activeTasks.put(new TaskId(0, 0), Utils.mkSet(t1)); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(Utils.mkSet(t1)); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void shouldCloseActiveTasksThatAreAssignedToThisStreamThreadButAssignmentHasChangedBeforeCreatingNewTasks() throws Exception { final KStreamBuilder builder = new KStreamBuilder(); builder.setApplicationId(applicationId); builder.stream(Pattern.compile("t.*")).to("out"); final Map<Collection<TopicPartition>, TestStreamTask> createdTasks = new HashMap<>(); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) { final ProcessorTopology topology = builder.build(id.topicGroupId); final TestStreamTask task = new TestStreamTask( id, applicationId, partitions, topology, consumer, clientSupplier.getProducer(new HashMap<String, Object>()), restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory); createdTasks.put(partitions, task); return task; } }; final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>(); final TopicPartition t1 = new TopicPartition("t1", 0); final Set<TopicPartition> task00Partitions = new HashSet<>(); task00Partitions.add(t1); final TaskId taskId = new TaskId(0, 0); activeTasks.put(taskId, task00Partitions); thread.setPartitionAssignor(new StreamPartitionAssignor() { @Override Map<TaskId, Set<TopicPartition>> activeTasks() { return activeTasks; } }); StreamPartitionAssignor.SubscriptionUpdates subscriptionUpdates = new StreamPartitionAssignor.SubscriptionUpdates(); Field updatedTopicsField = subscriptionUpdates.getClass().getDeclaredField("updatedTopicSubscriptions"); updatedTopicsField.setAccessible(true); Set<String> updatedTopics = (Set<String>) updatedTopicsField.get(subscriptionUpdates); updatedTopics.add(t1.topic()); builder.updateSubscriptions(subscriptionUpdates, null); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(task00Partitions); final TestStreamTask firstTask = createdTasks.get(task00Partitions); assertThat(firstTask.id(), is(taskId)); task00Partitions.add(new TopicPartition("t2", 0)); updatedTopics.add("t2"); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(task00Partitions); assertTrue("task should have been closed as assignment has changed", firstTask.closed); assertTrue("tasks state manager should have been closed as assignment has changed", firstTask.closedStateManager); assertThat(createdTasks.get(task00Partitions).id(), is(taskId)); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringTaskSuspension() throws Exception { final KStreamBuilder builder = new KStreamBuilder(); builder.setApplicationId(applicationId); builder.stream("t1").groupByKey(); final TestStreamTask testStreamTask = new TestStreamTask( new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.getProducer(new HashMap<String, Object>()), clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), mockTime)) { @Override public void suspend() { throw new RuntimeException("KABOOM!"); } }; final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) { return testStreamTask; } }; final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>(); activeTasks.put(testStreamTask.id(), testStreamTask.partitions); thread.setPartitionAssignor(new MockStreamsPartitionAssignor(activeTasks)); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions); try { thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); fail("should have thrown exception"); } catch (final Exception e) { } assertFalse(testStreamTask.committed); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test public void shouldNotViolateAtLeastOnceWhenExceptionOccursDuringFlushStateWhileSuspendingState() throws Exception { final KStreamBuilder builder = new KStreamBuilder(); builder.setApplicationId(applicationId); builder.stream("t1").groupByKey(); final TestStreamTask testStreamTask = new TestStreamTask( new TaskId(0, 0), applicationId, Utils.mkSet(new TopicPartition("t1", 0)), builder.build(0), clientSupplier.consumer, clientSupplier.getProducer(new HashMap<String, Object>()), clientSupplier.restoreConsumer, config, new MockStreamsMetrics(new Metrics()), new StateDirectory(applicationId, config.getString(StreamsConfig.STATE_DIR_CONFIG), mockTime)) { @Override protected void flushState() { throw new RuntimeException("KABOOM!"); } }; final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitions) { return testStreamTask; } }; final Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>(); activeTasks.put(testStreamTask.id(), testStreamTask.partitions); thread.setPartitionAssignor(new MockStreamsPartitionAssignor(activeTasks)); thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); thread.rebalanceListener.onPartitionsAssigned(testStreamTask.partitions); try { thread.rebalanceListener.onPartitionsRevoked(Collections.<TopicPartition>emptyList()); fail("should have thrown exception"); } catch (final Exception e) { } assertFalse(testStreamTask.committed); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test @SuppressWarnings("unchecked") public void shouldAlwaysUpdateWithLatestTopicsFromStreamPartitionAssignor() throws Exception { final TopologyBuilder topologyBuilder = new TopologyBuilder(); topologyBuilder.addSource("source", Pattern.compile("t.*")); topologyBuilder.addProcessor("processor", new MockProcessorSupplier(), "source"); final StreamThread thread = new StreamThread( topologyBuilder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(topologyBuilder, StreamsMetadataState.UNKNOWN_HOST), 0); final StreamPartitionAssignor partitionAssignor = new StreamPartitionAssignor(); final Map<String, Object> configurationMap = new HashMap<>(); configurationMap.put(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE, thread); configurationMap.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 0); partitionAssignor.configure(configurationMap); thread.setPartitionAssignor(partitionAssignor); final Field nodeToSourceTopicsField = topologyBuilder.getClass().getDeclaredField("nodeToSourceTopics"); nodeToSourceTopicsField.setAccessible(true); final Map<String, List<String>> nodeToSourceTopics = (Map<String, List<String>>) nodeToSourceTopicsField.get(topologyBuilder); final List<TopicPartition> topicPartitions = new ArrayList<>(); final TopicPartition topicPartition1 = new TopicPartition("topic-1", 0); final TopicPartition topicPartition2 = new TopicPartition("topic-2", 0); final TopicPartition topicPartition3 = new TopicPartition("topic-3", 0); final TaskId taskId1 = new TaskId(0, 0); final TaskId taskId2 = new TaskId(0, 0); final TaskId taskId3 = new TaskId(0, 0); List<TaskId> activeTasks = Arrays.asList(taskId1); final Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>(); AssignmentInfo info = new AssignmentInfo(activeTasks, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>()); topicPartitions.addAll(Arrays.asList(topicPartition1)); PartitionAssignor.Assignment assignment = new PartitionAssignor.Assignment(topicPartitions, info.encode()); partitionAssignor.onAssignment(assignment); assertTrue(nodeToSourceTopics.get("source").size() == 1); assertTrue(nodeToSourceTopics.get("source").contains("topic-1")); topicPartitions.clear(); activeTasks = Arrays.asList(taskId1, taskId2); info = new AssignmentInfo(activeTasks, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>()); topicPartitions.addAll(Arrays.asList(topicPartition1, topicPartition2)); assignment = new PartitionAssignor.Assignment(topicPartitions, info.encode()); partitionAssignor.onAssignment(assignment); assertTrue(nodeToSourceTopics.get("source").size() == 2); assertTrue(nodeToSourceTopics.get("source").contains("topic-1")); assertTrue(nodeToSourceTopics.get("source").contains("topic-2")); topicPartitions.clear(); activeTasks = Arrays.asList(taskId1, taskId2, taskId3); info = new AssignmentInfo(activeTasks, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>()); topicPartitions.addAll(Arrays.asList(topicPartition1, topicPartition2, topicPartition3)); assignment = new PartitionAssignor.Assignment(topicPartitions, info.encode()); partitionAssignor.onAssignment(assignment); assertTrue(nodeToSourceTopics.get("source").size() == 3); assertTrue(nodeToSourceTopics.get("source").contains("topic-1")); assertTrue(nodeToSourceTopics.get("source").contains("topic-2")); assertTrue(nodeToSourceTopics.get("source").contains("topic-3")); }
void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); }
StreamThread extends Thread { void setPartitionAssignor(final StreamPartitionAssignor partitionAssignor) { this.partitionAssignor = partitionAssignor; } StreamThread(final TopologyBuilder builder, final StreamsConfig config, final KafkaClientSupplier clientSupplier, final String applicationId, final String clientId, final UUID processId, final Metrics metrics, final Time time, final StreamsMetadataState streamsMetadataState, final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }
@Test(expected = NullPointerException.class) public void testRemoveNullSensor() { String groupName = "doesNotMatter"; Map<String, String> tags = new HashMap<>(); StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags); streamsMetrics.removeSensor(null); }
@Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); Metrics registry(); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); @Override Map<MetricName, ? extends Metric> metrics(); @Override void recordLatency(Sensor sensor, long startNs, long endNs); @Override void recordThroughput(Sensor sensor, long value); @Override Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); @Override Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); void measureLatencyNs(final Time time, final Runnable action, final Sensor sensor); @Override void removeSensor(Sensor sensor); }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); Metrics registry(); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); @Override Map<MetricName, ? extends Metric> metrics(); @Override void recordLatency(Sensor sensor, long startNs, long endNs); @Override void recordThroughput(Sensor sensor, long value); @Override Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); @Override Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); void measureLatencyNs(final Time time, final Runnable action, final Sensor sensor); @Override void removeSensor(Sensor sensor); }
@Test public void testRemoveSensor() { String groupName = "doesNotMatter"; String sensorName = "sensor1"; String scope = "scope"; String entity = "entity"; String operation = "put"; Map<String, String> tags = new HashMap<>(); StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags); Sensor sensor1 = streamsMetrics.addSensor(sensorName, Sensor.RecordingLevel.DEBUG); streamsMetrics.removeSensor(sensor1); Sensor sensor1a = streamsMetrics.addSensor(sensorName, Sensor.RecordingLevel.DEBUG, sensor1); streamsMetrics.removeSensor(sensor1a); Sensor sensor2 = streamsMetrics.addLatencyAndThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG); streamsMetrics.removeSensor(sensor2); Sensor sensor3 = streamsMetrics.addThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG); streamsMetrics.removeSensor(sensor3); }
@Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); Metrics registry(); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); @Override Map<MetricName, ? extends Metric> metrics(); @Override void recordLatency(Sensor sensor, long startNs, long endNs); @Override void recordThroughput(Sensor sensor, long value); @Override Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); @Override Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); void measureLatencyNs(final Time time, final Runnable action, final Sensor sensor); @Override void removeSensor(Sensor sensor); }
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); Metrics registry(); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); @Override Map<MetricName, ? extends Metric> metrics(); @Override void recordLatency(Sensor sensor, long startNs, long endNs); @Override void recordThroughput(Sensor sensor, long value); @Override Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); @Override Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); void measureLatencyNs(final Time time, final Runnable action, final Sensor sensor); @Override void removeSensor(Sensor sensor); }
@Test public void shouldLockGlobalStateDirectory() throws Exception { stateManager.initialize(context); assertTrue(new File(stateDirectory.globalStateDir(), ".lock").exists()); }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test(expected = LockException.class) public void shouldThrowLockExceptionIfCantGetLock() throws Exception { final StateDirectory stateDir = new StateDirectory("appId", stateDirPath, time); try { stateDir.lockGlobalState(1); stateManager.initialize(context); } finally { stateDir.unlockGlobalState(); } }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test public void shouldNotDeleteCheckpointFileAfterLoaded() throws Exception { writeCheckpoint(); stateManager.initialize(context); assertTrue(checkpointFile.exists()); }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfFailedToReadCheckpointedOffsets() throws Exception { writeCorruptCheckpoint(); stateManager.initialize(context); }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test public void shouldInitializeStateStores() throws Exception { stateManager.initialize(context); assertTrue(store1.initialized); assertTrue(store2.initialized); }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test public void shouldReturnInitializedStoreNames() throws Exception { final Set<String> storeNames = stateManager.initialize(context); assertEquals(Utils.mkSet(store1.name(), store2.name()), storeNames); }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test public void shouldReleaseLockIfExceptionWhenLoadingCheckpoints() throws Exception { writeCorruptCheckpoint(); try { stateManager.initialize(context); } catch (StreamsException e) { } final StateDirectory stateDir = new StateDirectory("appId", stateDirPath, new MockTime()); try { assertTrue(stateDir.lockGlobalState(1)); } finally { stateDir.unlockGlobalState(); } }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@Test public void shouldThrowLockExceptionIfIOExceptionCaughtWhenTryingToLockStateDir() throws Exception { stateManager = new GlobalStateManagerImpl(topology, consumer, new StateDirectory("appId", stateDirPath, time) { @Override public boolean lockGlobalState(final int retry) throws IOException { throw new IOException("KABOOM!"); } }); try { stateManager.initialize(context); fail("Should have thrown LockException"); } catch (final LockException e) { } }
@Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store, final boolean ignored, final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }
@SuppressWarnings("unchecked") @Test public void testMaybePunctuate() throws Exception { task.addRecords(partition1, records( new ConsumerRecord<>(partition1.topic(), partition1.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition1.topic(), partition1.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition1.topic(), partition1.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue) )); task.addRecords(partition2, records( new ConsumerRecord<>(partition2.topic(), partition2.partition(), 25, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 35, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 45, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue) )); assertTrue(task.maybePunctuate()); assertTrue(task.process()); assertEquals(5, task.numBuffered()); assertEquals(1, source1.numReceived); assertEquals(0, source2.numReceived); assertFalse(task.maybePunctuate()); assertTrue(task.process()); assertEquals(4, task.numBuffered()); assertEquals(1, source1.numReceived); assertEquals(1, source2.numReceived); assertTrue(task.maybePunctuate()); assertTrue(task.process()); assertEquals(3, task.numBuffered()); assertEquals(2, source1.numReceived); assertEquals(1, source2.numReceived); assertFalse(task.maybePunctuate()); assertTrue(task.process()); assertEquals(2, task.numBuffered()); assertEquals(2, source1.numReceived); assertEquals(2, source2.numReceived); assertTrue(task.maybePunctuate()); assertTrue(task.process()); assertEquals(1, task.numBuffered()); assertEquals(3, source1.numReceived); assertEquals(2, source2.numReceived); assertFalse(task.maybePunctuate()); assertTrue(task.process()); assertEquals(0, task.numBuffered()); assertEquals(3, source1.numReceived); assertEquals(3, source2.numReceived); assertFalse(task.process()); assertFalse(task.maybePunctuate()); processor.supplier.checkAndClearPunctuateResult(20L, 30L, 40L); }
boolean maybePunctuate() { final long timestamp = partitionGroup.timestamp(); if (timestamp == TimestampTracker.NOT_KNOWN) { return false; } else { return punctuationQueue.mayPunctuate(timestamp, this); } }
StreamTask extends AbstractTask implements Punctuator { boolean maybePunctuate() { final long timestamp = partitionGroup.timestamp(); if (timestamp == TimestampTracker.NOT_KNOWN) { return false; } else { return punctuationQueue.mayPunctuate(timestamp, this); } } }
StreamTask extends AbstractTask implements Punctuator { boolean maybePunctuate() { final long timestamp = partitionGroup.timestamp(); if (timestamp == TimestampTracker.NOT_KNOWN) { return false; } else { return punctuationQueue.mayPunctuate(timestamp, this); } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { boolean maybePunctuate() { final long timestamp = partitionGroup.timestamp(); if (timestamp == TimestampTracker.NOT_KNOWN) { return false; } else { return punctuationQueue.mayPunctuate(timestamp, this); } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { boolean maybePunctuate() { final long timestamp = partitionGroup.timestamp(); if (timestamp == TimestampTracker.NOT_KNOWN) { return false; } else { return punctuationQueue.mayPunctuate(timestamp, this); } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void shouldFlushRecordCollectorOnFlushState() throws Exception { final AtomicBoolean flushed = new AtomicBoolean(false); final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics()); final StreamTask streamTask = new StreamTask(taskId00, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer) { @Override RecordCollector createRecordCollector() { return new NoOpRecordCollector() { @Override public void flush() { flushed.set(true); } }; } }; streamTask.flushState(); assertTrue(flushed.get()); }
@Override protected void flushState() { log.trace("{} Flushing state and producer", logPrefix); super.flushState(); recordCollector.flush(); }
StreamTask extends AbstractTask implements Punctuator { @Override protected void flushState() { log.trace("{} Flushing state and producer", logPrefix); super.flushState(); recordCollector.flush(); } }
StreamTask extends AbstractTask implements Punctuator { @Override protected void flushState() { log.trace("{} Flushing state and producer", logPrefix); super.flushState(); recordCollector.flush(); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override protected void flushState() { log.trace("{} Flushing state and producer", logPrefix); super.flushState(); recordCollector.flush(); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override protected void flushState() { log.trace("{} Flushing state and producer", logPrefix); super.flushState(); recordCollector.flush(); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@SuppressWarnings("unchecked") @Test public void shouldCheckpointOffsetsOnCommit() throws Exception { final String storeName = "test"; final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName); final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) { @Override public void init(final ProcessorContext context, final StateStore root) { context.register(root, true, null); } @Override public boolean persistent() { return true; } }; Map<String, SourceNode> sourceByTopics = new HashMap() { { put(partition1.topic(), source1); put(partition2.topic(), source2); } }; final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), sourceByTopics, Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList()); final TopicPartition partition = new TopicPartition(changelogTopic, 0); restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList( new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0]))); restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L)); restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L)); final long offset = 543L; final StreamTask streamTask = new StreamTask(taskId00, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer) { @Override RecordCollector createRecordCollector() { return new NoOpRecordCollector() { @Override public Map<TopicPartition, Long> offsets() { return Collections.singletonMap(partition, offset); } }; } }; time.sleep(config.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); streamTask.commit(); final OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(stateDirectory.directoryForTask(taskId00), ProcessorStateManager.CHECKPOINT_FILE_NAME)); assertThat(checkpoint.read(), equalTo(Collections.singletonMap(partition, offset + 1))); }
@Override public void commit() { commitImpl(true); }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void testListConnectors() throws Throwable { final Capture<Callback<Collection<String>>> cb = Capture.newInstance(); herder.connectors(EasyMock.capture(cb)); expectAndCallbackResult(cb, Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); PowerMock.replayAll(); Collection<String> connectors = connectorsResource.listConnectors(FORWARD); assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), new HashSet<>(connectors)); PowerMock.verifyAll(); }
@GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@SuppressWarnings("unchecked") @Test public void shouldNotCheckpointOffsetsOnCommitIfEosIsEnabled() throws Exception { final Map<String, Object> properties = config.originals(); properties.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE); final StreamsConfig testConfig = new StreamsConfig(properties); final String storeName = "test"; final String changelogTopic = ProcessorStateManager.storeChangelogTopic("appId", storeName); final InMemoryKeyValueStore inMemoryStore = new InMemoryKeyValueStore(storeName, null, null) { @Override public void init(final ProcessorContext context, final StateStore root) { context.register(root, true, null); } @Override public boolean persistent() { return true; } }; Map<String, SourceNode> sourceByTopics = new HashMap() { { put(partition1.topic(), source1); put(partition2.topic(), source2); } }; final ProcessorTopology topology = new ProcessorTopology(Collections.<ProcessorNode>emptyList(), sourceByTopics, Collections.<String, SinkNode>emptyMap(), Collections.<StateStore>singletonList(inMemoryStore), Collections.singletonMap(storeName, changelogTopic), Collections.<StateStore>emptyList()); final TopicPartition partition = new TopicPartition(changelogTopic, 0); restoreStateConsumer.updatePartitions(changelogTopic, Collections.singletonList( new PartitionInfo(changelogTopic, 0, null, new Node[0], new Node[0]))); restoreStateConsumer.updateEndOffsets(Collections.singletonMap(partition, 0L)); restoreStateConsumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L)); final long offset = 543L; final StreamTask streamTask = new StreamTask(taskId00, "appId", partitions, topology, consumer, changelogReader, testConfig, streamsMetrics, stateDirectory, null, time, producer) { @Override RecordCollector createRecordCollector() { return new NoOpRecordCollector() { @Override public Map<TopicPartition, Long> offsets() { return Collections.singletonMap(partition, offset); } }; } }; time.sleep(testConfig.getLong(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG)); streamTask.commit(); final File checkpointFile = new File(stateDirectory.directoryForTask(taskId00), ProcessorStateManager.CHECKPOINT_FILE_NAME); assertFalse(checkpointFile.exists()); }
@Override public void commit() { commitImpl(true); }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void commit() { commitImpl(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void shouldCallPunctuateOnPassedInProcessorNode() throws Exception { task.punctuate(processor, 5); assertThat(processor.punctuatedAt, equalTo(5L)); task.punctuate(processor, 10); assertThat(processor.punctuatedAt, equalTo(10L)); }
@Override public void punctuate(final ProcessorNode node, final long timestamp) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%s Current node is not null", logPrefix)); } updateProcessorContext(new StampedRecord(DUMMY_RECORD, timestamp), node); log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp); try { node.punctuate(timestamp); } catch (final KafkaException e) { throw new StreamsException(String.format("%s Exception caught while punctuating processor '%s'", logPrefix, node.name()), e); } finally { processorContext.setCurrentNode(null); } }
StreamTask extends AbstractTask implements Punctuator { @Override public void punctuate(final ProcessorNode node, final long timestamp) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%s Current node is not null", logPrefix)); } updateProcessorContext(new StampedRecord(DUMMY_RECORD, timestamp), node); log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp); try { node.punctuate(timestamp); } catch (final KafkaException e) { throw new StreamsException(String.format("%s Exception caught while punctuating processor '%s'", logPrefix, node.name()), e); } finally { processorContext.setCurrentNode(null); } } }
StreamTask extends AbstractTask implements Punctuator { @Override public void punctuate(final ProcessorNode node, final long timestamp) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%s Current node is not null", logPrefix)); } updateProcessorContext(new StampedRecord(DUMMY_RECORD, timestamp), node); log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp); try { node.punctuate(timestamp); } catch (final KafkaException e) { throw new StreamsException(String.format("%s Exception caught while punctuating processor '%s'", logPrefix, node.name()), e); } finally { processorContext.setCurrentNode(null); } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void punctuate(final ProcessorNode node, final long timestamp) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%s Current node is not null", logPrefix)); } updateProcessorContext(new StampedRecord(DUMMY_RECORD, timestamp), node); log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp); try { node.punctuate(timestamp); } catch (final KafkaException e) { throw new StreamsException(String.format("%s Exception caught while punctuating processor '%s'", logPrefix, node.name()), e); } finally { processorContext.setCurrentNode(null); } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void punctuate(final ProcessorNode node, final long timestamp) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%s Current node is not null", logPrefix)); } updateProcessorContext(new StampedRecord(DUMMY_RECORD, timestamp), node); log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp); try { node.punctuate(timestamp); } catch (final KafkaException e) { throw new StreamsException(String.format("%s Exception caught while punctuating processor '%s'", logPrefix, node.name()), e); } finally { processorContext.setCurrentNode(null); } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionOnScheduleIfCurrentNodeIsNull() throws Exception { task.schedule(1); }
public void schedule(final long interval) { if (processorContext.currentNode() == null) { throw new IllegalStateException(String.format("%s Current node is null", logPrefix)); } punctuationQueue.schedule(new PunctuationSchedule(processorContext.currentNode(), interval)); }
StreamTask extends AbstractTask implements Punctuator { public void schedule(final long interval) { if (processorContext.currentNode() == null) { throw new IllegalStateException(String.format("%s Current node is null", logPrefix)); } punctuationQueue.schedule(new PunctuationSchedule(processorContext.currentNode(), interval)); } }
StreamTask extends AbstractTask implements Punctuator { public void schedule(final long interval) { if (processorContext.currentNode() == null) { throw new IllegalStateException(String.format("%s Current node is null", logPrefix)); } punctuationQueue.schedule(new PunctuationSchedule(processorContext.currentNode(), interval)); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { public void schedule(final long interval) { if (processorContext.currentNode() == null) { throw new IllegalStateException(String.format("%s Current node is null", logPrefix)); } punctuationQueue.schedule(new PunctuationSchedule(processorContext.currentNode(), interval)); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { public void schedule(final long interval) { if (processorContext.currentNode() == null) { throw new IllegalStateException(String.format("%s Current node is null", logPrefix)); } punctuationQueue.schedule(new PunctuationSchedule(processorContext.currentNode(), interval)); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@SuppressWarnings("unchecked") @Test public void shouldThrowExceptionIfAnyExceptionsRaisedDuringCloseButStillCloseAllProcessorNodesTopology() throws Exception { task.close(true); task = createTaskThatThrowsExceptionOnClose(); try { task.close(true); fail("should have thrown runtime exception"); } catch (final RuntimeException e) { task = null; } assertTrue(processor.closed); assertTrue(source1.closed); assertTrue(source2.closed); }
@Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void shouldCommitTransactionOnSuspendEvenIfTransactionIsEmptyIfEosEnabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, eosConfig, streamsMetrics, stateDirectory, null, time, producer); task.suspend(); assertTrue(producer.transactionCommitted()); assertFalse(producer.transactionInFlight()); }
@Override public void suspend() { suspend(true); }
StreamTask extends AbstractTask implements Punctuator { @Override public void suspend() { suspend(true); } }
StreamTask extends AbstractTask implements Punctuator { @Override public void suspend() { suspend(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void suspend() { suspend(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void suspend() { suspend(true); } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void shouldAbortTransactionOnDirtyClosedIfEosEnabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, eosConfig, streamsMetrics, stateDirectory, null, time, producer); task.close(false); task = null; assertTrue(producer.transactionAborted()); }
@Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void shouldNotAbortTransactionOnDirtyClosedIfEosDisabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer); task.close(false); assertFalse(producer.transactionAborted()); }
@Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@SuppressWarnings("unchecked") @Test public void shouldCloseProducerOnCloseWhenEosEnabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, eosConfig, streamsMetrics, stateDirectory, null, time, producer); task.close(true); task = null; assertTrue(producer.closed()); }
@Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id, final String applicationId, final Collection<TopicPartition> partitions, final ProcessorTopology topology, final Consumer<byte[], byte[]> consumer, final ChangelogReader changelogReader, final StreamsConfig config, final StreamsMetrics metrics, final StateDirectory stateDirectory, final ThreadCache cache, final Time time, final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }
@Test public void shouldNotThrowNPEWhenOnChangeNotCalled() throws Exception { new StreamsMetadataState(builder, hostOne).getAllMetadataForStore("store"); }
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test public void shouldGetAllStreamInstances() throws Exception { final StreamsMetadata one = new StreamsMetadata(hostOne, Utils.mkSet(globalTable, "table-one", "table-two", "merged-table"), Utils.mkSet(topic1P0, topic2P1, topic4P0)); final StreamsMetadata two = new StreamsMetadata(hostTwo, Utils.mkSet(globalTable, "table-two", "table-one", "merged-table"), Utils.mkSet(topic2P0, topic1P1)); final StreamsMetadata three = new StreamsMetadata(hostThree, Utils.mkSet(globalTable, "table-three"), Collections.singleton(topic3P0)); Collection<StreamsMetadata> actual = discovery.getAllMetadata(); assertEquals(3, actual.size()); assertTrue("expected " + actual + " to contain " + one, actual.contains(one)); assertTrue("expected " + actual + " to contain " + two, actual.contains(two)); assertTrue("expected " + actual + " to contain " + three, actual.contains(three)); }
public synchronized Collection<StreamsMetadata> getAllMetadata() { return allMetadata; }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadata() { return allMetadata; } }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadata() { return allMetadata; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadata() { return allMetadata; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadata() { return allMetadata; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test public void testListConnectorsNotLeader() throws Throwable { final Capture<Callback<Collection<String>>> cb = Capture.newInstance(); herder.connectors(EasyMock.capture(cb)); expectAndCallbackNotLeaderException(cb); EasyMock.expect(RestServer.httpRequest(EasyMock.eq("http: EasyMock.isNull(), EasyMock.anyObject(TypeReference.class))) .andReturn(new RestServer.HttpResponse<>(200, new HashMap<String, List<String>>(), Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME))); PowerMock.replayAll(); Collection<String> connectors = connectorsResource.listConnectors(FORWARD); assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), new HashSet<>(connectors)); PowerMock.verifyAll(); }
@GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void shouldGetInstancesForStoreName() throws Exception { final StreamsMetadata one = new StreamsMetadata(hostOne, Utils.mkSet(globalTable, "table-one", "table-two", "merged-table"), Utils.mkSet(topic1P0, topic2P1, topic4P0)); final StreamsMetadata two = new StreamsMetadata(hostTwo, Utils.mkSet(globalTable, "table-two", "table-one", "merged-table"), Utils.mkSet(topic2P0, topic1P1)); final Collection<StreamsMetadata> actual = discovery.getAllMetadataForStore("table-one"); assertEquals(2, actual.size()); assertTrue("expected " + actual + " to contain " + one, actual.contains(one)); assertTrue("expected " + actual + " to contain " + two, actual.contains(two)); }
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test(expected = NullPointerException.class) public void shouldThrowIfStoreNameIsNullOnGetAllInstancesWithStore() throws Exception { discovery.getAllMetadataForStore(null); }
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test public void shouldReturnEmptyCollectionOnGetAllInstancesWithStoreWhenStoreDoesntExist() throws Exception { final Collection<StreamsMetadata> actual = discovery.getAllMetadataForStore("not-a-store"); assertTrue(actual.isEmpty()); }
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test public void shouldReturnNullOnGetWithKeyWhenStoreDoesntExist() throws Exception { final StreamsMetadata actual = discovery.getMetadataWithKey("not-a-store", "key", Serdes.String().serializer()); assertNull(actual); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test(expected = NullPointerException.class) public void shouldThrowWhenKeyIsNull() throws Exception { discovery.getMetadataWithKey("table-three", null, Serdes.String().serializer()); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test(expected = NullPointerException.class) public void shouldThrowWhenSerializerIsNull() throws Exception { discovery.getMetadataWithKey("table-three", "key", (Serializer) null); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test(expected = NullPointerException.class) public void shouldThrowIfStoreNameIsNull() throws Exception { discovery.getMetadataWithKey(null, "key", Serdes.String().serializer()); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@SuppressWarnings("unchecked") @Test(expected = NullPointerException.class) public void shouldThrowIfStreamPartitionerIsNull() throws Exception { discovery.getMetadataWithKey(null, "key", (StreamPartitioner) null); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test public void shouldHaveGlobalStoreInAllMetadata() throws Exception { final Collection<StreamsMetadata> metadata = discovery.getAllMetadataForStore(globalTable); assertEquals(3, metadata.size()); for (StreamsMetadata streamsMetadata : metadata) { assertTrue(streamsMetadata.stateStoreNames().contains(globalTable)); } }
public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test public void shouldGetMyMetadataForGlobalStoreWithKey() throws Exception { final StreamsMetadata metadata = discovery.getMetadataWithKey(globalTable, "key", Serdes.String().serializer()); assertEquals(hostOne, metadata.hostInfo()); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test(expected = ConnectException.class) public void testListConnectorsNotSynced() throws Throwable { final Capture<Callback<Collection<String>>> cb = Capture.newInstance(); herder.connectors(EasyMock.capture(cb)); expectAndCallbackException(cb, new ConnectException("not synced")); PowerMock.replayAll(); connectorsResource.listConnectors(FORWARD); }
@GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
ConnectorsResource { @GET @Path("/") public Collection<String> listConnectors(final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Collection<String>> cb = new FutureCallback<>(); herder.connectors(cb); return completeOrForwardRequest(cb, "/connectors", "GET", null, new TypeReference<Collection<String>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void shouldGetMyMetadataForGlobalStoreWithKeyAndPartitioner() throws Exception { final StreamsMetadata metadata = discovery.getMetadataWithKey(globalTable, "key", partitioner); assertEquals(hostOne, metadata.hostInfo()); }
public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); }
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName, final K key, final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }
@Test @SuppressWarnings("unchecked") public void shouldThrowStreamsExceptionOnInputRecordWithInvalidTimestamp() { final Bytes anyKey = new Bytes("any key".getBytes()); final Bytes anyValue = new Bytes("any value".getBytes()); context.setTime(-1); try { sink.process(anyKey, anyValue); fail("Should have thrown StreamsException"); } catch (final StreamsException ignored) { } }
@Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
@Test @SuppressWarnings("unchecked") public void shouldThrowStreamsExceptionOnKeyValueTypeSerializerMismatch() { final String keyOfDifferentTypeThanSerializer = "key with different type"; final String valueOfDifferentTypeThanSerializer = "value with different type"; context.setTime(0); try { sink.process(keyOfDifferentTypeThanSerializer, valueOfDifferentTypeThanSerializer); fail("Should have thrown StreamsException"); } catch (final StreamsException e) { assertThat(e.getCause(), instanceOf(ClassCastException.class)); } }
@Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
@Test @SuppressWarnings("unchecked") public void shouldHandleNullKeysWhenThrowingStreamsExceptionOnKeyValueTypeSerializerMismatch() { final String invalidValueToTriggerSerializerMismatch = ""; context.setTime(1); try { sink.process(null, invalidValueToTriggerSerializerMismatch); fail("Should have thrown StreamsException"); } catch (final StreamsException e) { assertThat(e.getCause(), instanceOf(ClassCastException.class)); assertThat(e.getMessage(), containsString("unknown because key is null")); } }
@Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
@Test @SuppressWarnings("unchecked") public void shouldHandleNullValuesWhenThrowingStreamsExceptionOnKeyValueTypeSerializerMismatch() { final String invalidKeyToTriggerSerializerMismatch = ""; context.setTime(1); try { sink.process(invalidKeyToTriggerSerializerMismatch, null); fail("Should have thrown StreamsException"); } catch (final StreamsException e) { assertThat(e.getCause(), instanceOf(ClassCastException.class)); assertThat(e.getMessage(), containsString("unknown because value is null")); } }
@Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }
@SuppressWarnings("unchecked") @Test public void shouldDriveGlobalStore() throws Exception { final StateStoreSupplier storeSupplier = Stores.create("my-store") .withStringKeys().withStringValues().inMemory().disableLogging().build(); final String global = "global"; final String topic = "topic"; final TopologyBuilder topologyBuilder = this.builder .addGlobalStore(storeSupplier, global, STRING_DESERIALIZER, STRING_DESERIALIZER, topic, "processor", define(new StatefulProcessor("my-store"))); driver = new ProcessorTopologyTestDriver(config, topologyBuilder); final KeyValueStore<String, String> globalStore = (KeyValueStore<String, String>) topologyBuilder.globalStateStores().get("my-store"); driver.process(topic, "key1", "value1", STRING_SERIALIZER, STRING_SERIALIZER); driver.process(topic, "key2", "value2", STRING_SERIALIZER, STRING_SERIALIZER); assertEquals("value1", globalStore.get("key1")); assertEquals("value2", globalStore.get("key2")); }
public List<StateStore> globalStateStores() { return globalStateStores; }
ProcessorTopology { public List<StateStore> globalStateStores() { return globalStateStores; } }
ProcessorTopology { public List<StateStore> globalStateStores() { return globalStateStores; } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); }
ProcessorTopology { public List<StateStore> globalStateStores() { return globalStateStores; } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
ProcessorTopology { public List<StateStore> globalStateStores() { return globalStateStores; } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
@Test public void shouldCreateStringWithSourceAndTopics() throws Exception { builder.addSource("source", "topic1", "topic2"); final ProcessorTopology topology = builder.build(null); final String result = topology.toString(); assertThat(result, containsString("source:\n\t\ttopics:\t\t[topic1, topic2]\n")); }
@Override public String toString() { return toString(""); }
ProcessorTopology { @Override public String toString() { return toString(""); } }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
@Test public void shouldCreateStringWithMultipleSourcesAndTopics() throws Exception { builder.addSource("source", "topic1", "topic2"); builder.addSource("source2", "t", "t1", "t2"); final ProcessorTopology topology = builder.build(null); final String result = topology.toString(); assertThat(result, containsString("source:\n\t\ttopics:\t\t[topic1, topic2]\n")); assertThat(result, containsString("source2:\n\t\ttopics:\t\t[t, t1, t2]\n")); }
@Override public String toString() { return toString(""); }
ProcessorTopology { @Override public String toString() { return toString(""); } }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
@Test public void shouldCreateStringWithProcessors() throws Exception { builder.addSource("source", "t") .addProcessor("processor", mockProcessorSupplier, "source") .addProcessor("other", mockProcessorSupplier, "source"); final ProcessorTopology topology = builder.build(null); final String result = topology.toString(); assertThat(result, containsString("\t\tchildren:\t[processor, other]")); assertThat(result, containsString("processor:\n")); assertThat(result, containsString("other:\n")); }
@Override public String toString() { return toString(""); }
ProcessorTopology { @Override public String toString() { return toString(""); } }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
@Test public void shouldRecursivelyPrintChildren() throws Exception { builder.addSource("source", "t") .addProcessor("processor", mockProcessorSupplier, "source") .addProcessor("child-one", mockProcessorSupplier, "processor") .addProcessor("child-one-one", mockProcessorSupplier, "child-one") .addProcessor("child-two", mockProcessorSupplier, "processor") .addProcessor("child-two-one", mockProcessorSupplier, "child-two"); final String result = builder.build(null).toString(); assertThat(result, containsString("child-one:\n\t\tchildren:\t[child-one-one]")); assertThat(result, containsString("child-two:\n\t\tchildren:\t[child-two-one]")); }
@Override public String toString() { return toString(""); }
ProcessorTopology { @Override public String toString() { return toString(""); } }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes, final Map<String, SourceNode> sourceByTopics, final Map<String, SinkNode> sinkByTopics, final List<StateStore> stateStores, final Map<String, String> storeToChangelogTopic, final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }
@Test public void testCreateConnector() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME)); final Capture<Callback<Herder.Created<ConnectorInfo>>> cb = Capture.newInstance(); herder.putConnectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(body.config()), EasyMock.eq(false), EasyMock.capture(cb)); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, CONNECTOR_TASK_NAMES))); PowerMock.replayAll(); connectorsResource.createConnector(FORWARD, body); PowerMock.verifyAll(); }
@POST @Path("/") public Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest) throws Throwable { String name = createRequest.name(); if (name.contains("/")) { throw new BadRequestException("connector name should not contain '/'"); } Map<String, String> configs = createRequest.config(); if (!configs.containsKey(ConnectorConfig.NAME_CONFIG)) configs.put(ConnectorConfig.NAME_CONFIG, name); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, false, cb); Herder.Created<ConnectorInfo> info = completeOrForwardRequest(cb, "/connectors", "POST", createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); return Response.created(URI.create("/connectors/" + name)).entity(info.result()).build(); }
ConnectorsResource { @POST @Path("/") public Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest) throws Throwable { String name = createRequest.name(); if (name.contains("/")) { throw new BadRequestException("connector name should not contain '/'"); } Map<String, String> configs = createRequest.config(); if (!configs.containsKey(ConnectorConfig.NAME_CONFIG)) configs.put(ConnectorConfig.NAME_CONFIG, name); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, false, cb); Herder.Created<ConnectorInfo> info = completeOrForwardRequest(cb, "/connectors", "POST", createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); return Response.created(URI.create("/connectors/" + name)).entity(info.result()).build(); } }
ConnectorsResource { @POST @Path("/") public Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest) throws Throwable { String name = createRequest.name(); if (name.contains("/")) { throw new BadRequestException("connector name should not contain '/'"); } Map<String, String> configs = createRequest.config(); if (!configs.containsKey(ConnectorConfig.NAME_CONFIG)) configs.put(ConnectorConfig.NAME_CONFIG, name); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, false, cb); Herder.Created<ConnectorInfo> info = completeOrForwardRequest(cb, "/connectors", "POST", createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); return Response.created(URI.create("/connectors/" + name)).entity(info.result()).build(); } ConnectorsResource(Herder herder); }
ConnectorsResource { @POST @Path("/") public Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest) throws Throwable { String name = createRequest.name(); if (name.contains("/")) { throw new BadRequestException("connector name should not contain '/'"); } Map<String, String> configs = createRequest.config(); if (!configs.containsKey(ConnectorConfig.NAME_CONFIG)) configs.put(ConnectorConfig.NAME_CONFIG, name); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, false, cb); Herder.Created<ConnectorInfo> info = completeOrForwardRequest(cb, "/connectors", "POST", createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); return Response.created(URI.create("/connectors/" + name)).entity(info.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
ConnectorsResource { @POST @Path("/") public Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest) throws Throwable { String name = createRequest.name(); if (name.contains("/")) { throw new BadRequestException("connector name should not contain '/'"); } Map<String, String> configs = createRequest.config(); if (!configs.containsKey(ConnectorConfig.NAME_CONFIG)) configs.put(ConnectorConfig.NAME_CONFIG, name); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, false, cb); Herder.Created<ConnectorInfo> info = completeOrForwardRequest(cb, "/connectors", "POST", createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); return Response.created(URI.create("/connectors/" + name)).entity(info.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void shouldHaveCompactionPropSetIfSupplied() throws Exception { final Properties properties = new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), Collections.<String, String>emptyMap()).toProperties(0); assertEquals("compact", properties.getProperty(InternalTopicManager.CLEANUP_POLICY_PROP)); }
public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
@Test public void shouldBeCompactedIfCleanupPolicyCompactOrCompactAndDelete() throws Exception { assertTrue(new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), Collections.<String, String>emptyMap()).isCompacted()); assertTrue(new InternalTopicConfig("name", Utils.mkSet(InternalTopicConfig.CleanupPolicy.compact, InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap()).isCompacted()); }
boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
@Test public void shouldNotBeCompactedWhenCleanupPolicyIsDelete() throws Exception { assertFalse(new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap()).isCompacted()); }
boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
@Test public void shouldUseCleanupPolicyFromConfigIfSupplied() throws Exception { final InternalTopicConfig config = new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.singletonMap("cleanup.policy", "compact")); final Properties properties = config.toProperties(0); assertEquals("compact", properties.getProperty("cleanup.policy")); }
public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
@Test public void shouldHavePropertiesSuppliedByUser() throws Exception { final Map<String, String> configs = new HashMap<>(); configs.put("retention.ms", "1000"); configs.put("retention.bytes", "10000"); final InternalTopicConfig topicConfig = new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), configs); final Properties properties = topicConfig.toProperties(0); assertEquals("1000", properties.getProperty("retention.ms")); assertEquals("10000", properties.getProperty("retention.bytes")); }
public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }
@SuppressWarnings("unchecked") @Test public void testSubscription() throws Exception { builder.addSource("source1", "topic1"); builder.addSource("source2", "topic2"); builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2"); final Set<TaskId> prevTasks = Utils.mkSet( new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1)); final Set<TaskId> cachedTasks = Utils.mkSet( new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1), new TaskId(0, 2), new TaskId(1, 2), new TaskId(2, 2)); String clientId = "client-id"; UUID processId = UUID.randomUUID(); StreamThread thread = new StreamThread(builder, config, new MockClientSupplier(), "test", clientId, processId, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override public Set<TaskId> prevActiveTasks() { return prevTasks; } @Override public Set<TaskId> cachedTasks() { return cachedTasks; } }; partitionAssignor.configure(config.getConsumerConfigs(thread, "test", clientId)); PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("topic1", "topic2")); Collections.sort(subscription.topics()); assertEquals(Utils.mkList("topic1", "topic2"), subscription.topics()); Set<TaskId> standbyTasks = new HashSet<>(cachedTasks); standbyTasks.removeAll(prevTasks); SubscriptionInfo info = new SubscriptionInfo(processId, prevTasks, standbyTasks, null); assertEquals(info.encode(), subscription.userData()); }
@Override public Subscription subscription(Set<String> topics) { final Set<TaskId> previousActiveTasks = streamThread.prevActiveTasks(); Set<TaskId> standbyTasks = streamThread.cachedTasks(); standbyTasks.removeAll(previousActiveTasks); SubscriptionInfo data = new SubscriptionInfo(streamThread.processId, previousActiveTasks, standbyTasks, this.userEndPoint); if (streamThread.builder.sourceTopicPattern() != null && !streamThread.builder.subscriptionUpdates().getUpdates().equals(topics)) { updateSubscribedTopics(topics); } return new Subscription(new ArrayList<>(topics), data.encode()); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public Subscription subscription(Set<String> topics) { final Set<TaskId> previousActiveTasks = streamThread.prevActiveTasks(); Set<TaskId> standbyTasks = streamThread.cachedTasks(); standbyTasks.removeAll(previousActiveTasks); SubscriptionInfo data = new SubscriptionInfo(streamThread.processId, previousActiveTasks, standbyTasks, this.userEndPoint); if (streamThread.builder.sourceTopicPattern() != null && !streamThread.builder.subscriptionUpdates().getUpdates().equals(topics)) { updateSubscribedTopics(topics); } return new Subscription(new ArrayList<>(topics), data.encode()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public Subscription subscription(Set<String> topics) { final Set<TaskId> previousActiveTasks = streamThread.prevActiveTasks(); Set<TaskId> standbyTasks = streamThread.cachedTasks(); standbyTasks.removeAll(previousActiveTasks); SubscriptionInfo data = new SubscriptionInfo(streamThread.processId, previousActiveTasks, standbyTasks, this.userEndPoint); if (streamThread.builder.sourceTopicPattern() != null && !streamThread.builder.subscriptionUpdates().getUpdates().equals(topics)) { updateSubscribedTopics(topics); } return new Subscription(new ArrayList<>(topics), data.encode()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public Subscription subscription(Set<String> topics) { final Set<TaskId> previousActiveTasks = streamThread.prevActiveTasks(); Set<TaskId> standbyTasks = streamThread.cachedTasks(); standbyTasks.removeAll(previousActiveTasks); SubscriptionInfo data = new SubscriptionInfo(streamThread.processId, previousActiveTasks, standbyTasks, this.userEndPoint); if (streamThread.builder.sourceTopicPattern() != null && !streamThread.builder.subscriptionUpdates().getUpdates().equals(topics)) { updateSubscribedTopics(topics); } return new Subscription(new ArrayList<>(topics), data.encode()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public Subscription subscription(Set<String> topics) { final Set<TaskId> previousActiveTasks = streamThread.prevActiveTasks(); Set<TaskId> standbyTasks = streamThread.cachedTasks(); standbyTasks.removeAll(previousActiveTasks); SubscriptionInfo data = new SubscriptionInfo(streamThread.processId, previousActiveTasks, standbyTasks, this.userEndPoint); if (streamThread.builder.sourceTopicPattern() != null && !streamThread.builder.subscriptionUpdates().getUpdates().equals(topics)) { updateSubscribedTopics(topics); } return new Subscription(new ArrayList<>(topics), data.encode()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }
@Test public void testOnAssignment() throws Exception { TopicPartition t2p3 = new TopicPartition("topic2", 3); TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source1", "topic1"); builder.addSource("source2", "topic2"); builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2"); UUID uuid = UUID.randomUUID(); String client1 = "client1"; StreamThread thread = new StreamThread(builder, config, mockClientSupplier, "test", client1, uuid, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); partitionAssignor.configure(config.getConsumerConfigs(thread, "test", client1)); List<TaskId> activeTaskList = Utils.mkList(task0, task3); Map<TaskId, Set<TopicPartition>> activeTasks = new HashMap<>(); Map<TaskId, Set<TopicPartition>> standbyTasks = new HashMap<>(); activeTasks.put(task0, Utils.mkSet(t1p0)); activeTasks.put(task3, Utils.mkSet(t2p3)); standbyTasks.put(task1, Utils.mkSet(t1p0)); standbyTasks.put(task2, Utils.mkSet(t2p0)); AssignmentInfo info = new AssignmentInfo(activeTaskList, standbyTasks, new HashMap<HostInfo, Set<TopicPartition>>()); PartitionAssignor.Assignment assignment = new PartitionAssignor.Assignment(Utils.mkList(t1p0, t2p3), info.encode()); partitionAssignor.onAssignment(assignment); assertEquals(activeTasks, partitionAssignor.activeTasks()); assertEquals(standbyTasks, partitionAssignor.standbyTasks()); }
@Override public void onAssignment(Assignment assignment) { List<TopicPartition> partitions = new ArrayList<>(assignment.partitions()); Collections.sort(partitions, PARTITION_COMPARATOR); AssignmentInfo info = AssignmentInfo.decode(assignment.userData()); this.standbyTasks = info.standbyTasks; this.activeTasks = new HashMap<>(); if (partitions.size() != info.activeTasks.size()) { throw new TaskAssignmentException( String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()) ); } for (int i = 0; i < partitions.size(); i++) { TopicPartition partition = partitions.get(i); TaskId id = info.activeTasks.get(i); Set<TopicPartition> assignedPartitions = activeTasks.get(id); if (assignedPartitions == null) { assignedPartitions = new HashSet<>(); activeTasks.put(id, assignedPartitions); } assignedPartitions.add(partition); } this.partitionsByHostState = info.partitionsByHost; final Collection<Set<TopicPartition>> values = partitionsByHostState.values(); final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>(); for (Set<TopicPartition> value : values) { for (TopicPartition topicPartition : value) { topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0])); } } metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo); checkForNewTopicAssignments(assignment); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void onAssignment(Assignment assignment) { List<TopicPartition> partitions = new ArrayList<>(assignment.partitions()); Collections.sort(partitions, PARTITION_COMPARATOR); AssignmentInfo info = AssignmentInfo.decode(assignment.userData()); this.standbyTasks = info.standbyTasks; this.activeTasks = new HashMap<>(); if (partitions.size() != info.activeTasks.size()) { throw new TaskAssignmentException( String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()) ); } for (int i = 0; i < partitions.size(); i++) { TopicPartition partition = partitions.get(i); TaskId id = info.activeTasks.get(i); Set<TopicPartition> assignedPartitions = activeTasks.get(id); if (assignedPartitions == null) { assignedPartitions = new HashSet<>(); activeTasks.put(id, assignedPartitions); } assignedPartitions.add(partition); } this.partitionsByHostState = info.partitionsByHost; final Collection<Set<TopicPartition>> values = partitionsByHostState.values(); final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>(); for (Set<TopicPartition> value : values) { for (TopicPartition topicPartition : value) { topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0])); } } metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo); checkForNewTopicAssignments(assignment); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void onAssignment(Assignment assignment) { List<TopicPartition> partitions = new ArrayList<>(assignment.partitions()); Collections.sort(partitions, PARTITION_COMPARATOR); AssignmentInfo info = AssignmentInfo.decode(assignment.userData()); this.standbyTasks = info.standbyTasks; this.activeTasks = new HashMap<>(); if (partitions.size() != info.activeTasks.size()) { throw new TaskAssignmentException( String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()) ); } for (int i = 0; i < partitions.size(); i++) { TopicPartition partition = partitions.get(i); TaskId id = info.activeTasks.get(i); Set<TopicPartition> assignedPartitions = activeTasks.get(id); if (assignedPartitions == null) { assignedPartitions = new HashSet<>(); activeTasks.put(id, assignedPartitions); } assignedPartitions.add(partition); } this.partitionsByHostState = info.partitionsByHost; final Collection<Set<TopicPartition>> values = partitionsByHostState.values(); final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>(); for (Set<TopicPartition> value : values) { for (TopicPartition topicPartition : value) { topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0])); } } metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo); checkForNewTopicAssignments(assignment); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void onAssignment(Assignment assignment) { List<TopicPartition> partitions = new ArrayList<>(assignment.partitions()); Collections.sort(partitions, PARTITION_COMPARATOR); AssignmentInfo info = AssignmentInfo.decode(assignment.userData()); this.standbyTasks = info.standbyTasks; this.activeTasks = new HashMap<>(); if (partitions.size() != info.activeTasks.size()) { throw new TaskAssignmentException( String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()) ); } for (int i = 0; i < partitions.size(); i++) { TopicPartition partition = partitions.get(i); TaskId id = info.activeTasks.get(i); Set<TopicPartition> assignedPartitions = activeTasks.get(id); if (assignedPartitions == null) { assignedPartitions = new HashSet<>(); activeTasks.put(id, assignedPartitions); } assignedPartitions.add(partition); } this.partitionsByHostState = info.partitionsByHost; final Collection<Set<TopicPartition>> values = partitionsByHostState.values(); final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>(); for (Set<TopicPartition> value : values) { for (TopicPartition topicPartition : value) { topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0])); } } metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo); checkForNewTopicAssignments(assignment); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void onAssignment(Assignment assignment) { List<TopicPartition> partitions = new ArrayList<>(assignment.partitions()); Collections.sort(partitions, PARTITION_COMPARATOR); AssignmentInfo info = AssignmentInfo.decode(assignment.userData()); this.standbyTasks = info.standbyTasks; this.activeTasks = new HashMap<>(); if (partitions.size() != info.activeTasks.size()) { throw new TaskAssignmentException( String.format("stream-thread [%s] Number of assigned partitions %d is not equal to the number of active taskIds %d" + ", assignmentInfo=%s", streamThread.getName(), partitions.size(), info.activeTasks.size(), info.toString()) ); } for (int i = 0; i < partitions.size(); i++) { TopicPartition partition = partitions.get(i); TaskId id = info.activeTasks.get(i); Set<TopicPartition> assignedPartitions = activeTasks.get(id); if (assignedPartitions == null) { assignedPartitions = new HashSet<>(); activeTasks.put(id, assignedPartitions); } assignedPartitions.add(partition); } this.partitionsByHostState = info.partitionsByHost; final Collection<Set<TopicPartition>> values = partitionsByHostState.values(); final Map<TopicPartition, PartitionInfo> topicToPartitionInfo = new HashMap<>(); for (Set<TopicPartition> value : values) { for (TopicPartition topicPartition : value) { topicToPartitionInfo.put(topicPartition, new PartitionInfo(topicPartition.topic(), topicPartition.partition(), null, new Node[0], new Node[0])); } } metadataWithInternalTopics = Cluster.empty().withPartitions(topicToPartitionInfo); checkForNewTopicAssignments(assignment); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }
@Test public void shouldThrowExceptionIfApplicationServerConfigPortIsNotAnInteger() throws Exception { final Properties properties = configProps(); final String myEndPoint = "localhost:j87yhk"; properties.put(StreamsConfig.APPLICATION_SERVER_CONFIG, myEndPoint); final StreamsConfig config = new StreamsConfig(properties); final UUID uuid1 = UUID.randomUUID(); final String client1 = "client1"; final String applicationId = "application-id"; builder.setApplicationId(applicationId); final StreamThread streamThread = new StreamThread(builder, config, mockClientSupplier, applicationId, client1, uuid1, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); try { partitionAssignor.configure(config.getConsumerConfigs(streamThread, applicationId, client1)); Assert.fail("expected to an exception due to invalid config"); } catch (ConfigException e) { } }
@Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }
@Test public void shouldReturnEmptyClusterMetadataIfItHasntBeenBuilt() throws Exception { final Cluster cluster = partitionAssignor.clusterMetadata(); assertNotNull(cluster); }
Cluster clusterMetadata() { if (metadataWithInternalTopics == null) { return Cluster.empty(); } return metadataWithInternalTopics; }
StreamPartitionAssignor implements PartitionAssignor, Configurable { Cluster clusterMetadata() { if (metadataWithInternalTopics == null) { return Cluster.empty(); } return metadataWithInternalTopics; } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { Cluster clusterMetadata() { if (metadataWithInternalTopics == null) { return Cluster.empty(); } return metadataWithInternalTopics; } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { Cluster clusterMetadata() { if (metadataWithInternalTopics == null) { return Cluster.empty(); } return metadataWithInternalTopics; } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { Cluster clusterMetadata() { if (metadataWithInternalTopics == null) { return Cluster.empty(); } return metadataWithInternalTopics; } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }
@Test(expected = KafkaException.class) public void shouldThrowKafkaExceptionIfStreamThreadNotConfigured() throws Exception { partitionAssignor.configure(Collections.singletonMap(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1)); }
@Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }
@Test public void testDeleteConnector() throws Throwable { final Capture<Callback<Herder.Created<ConnectorInfo>>> cb = Capture.newInstance(); herder.deleteConnectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackResult(cb, null); PowerMock.replayAll(); connectorsResource.destroyConnector(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); }
@DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } ConnectorsResource(Herder herder); }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test(expected = KafkaException.class) public void shouldThrowKafkaExceptionIfStreamThreadConfigIsNotStreamThreadInstance() throws Exception { final Map<String, Object> config = new HashMap<>(); config.put(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG, 1); config.put(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE, "i am not a stream thread"); partitionAssignor.configure(config); }
@Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); }
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public void configure(Map<String, ?> configs) { numStandbyReplicas = (Integer) configs.get(StreamsConfig.NUM_STANDBY_REPLICAS_CONFIG); Object o = configs.get(StreamsConfig.InternalConfig.STREAM_THREAD_INSTANCE); if (o == null) { KafkaException ex = new KafkaException("StreamThread is not specified"); log.error(ex.getMessage(), ex); throw ex; } if (!(o instanceof StreamThread)) { KafkaException ex = new KafkaException(String.format("%s is not an instance of %s", o.getClass().getName(), StreamThread.class.getName())); log.error(ex.getMessage(), ex); throw ex; } streamThread = (StreamThread) o; streamThread.setPartitionAssignor(this); String userEndPoint = (String) configs.get(StreamsConfig.APPLICATION_SERVER_CONFIG); if (userEndPoint != null && !userEndPoint.isEmpty()) { try { String host = getHost(userEndPoint); Integer port = getPort(userEndPoint); if (host == null || port == null) throw new ConfigException(String.format("stream-thread [%s] Config %s isn't in the correct format. Expected a host:port pair" + " but received %s", streamThread.getName(), StreamsConfig.APPLICATION_SERVER_CONFIG, userEndPoint)); } catch (NumberFormatException nfe) { throw new ConfigException(String.format("stream-thread [%s] Invalid port supplied in %s for config %s", streamThread.getName(), userEndPoint, StreamsConfig.APPLICATION_SERVER_CONFIG)); } this.userEndPoint = userEndPoint; } internalTopicManager = new InternalTopicManager( new StreamsKafkaClient(this.streamThread.config), configs.containsKey(StreamsConfig.REPLICATION_FACTOR_CONFIG) ? (Integer) configs.get(StreamsConfig.REPLICATION_FACTOR_CONFIG) : 1, configs.containsKey(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) ? (Long) configs.get(StreamsConfig.WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG) : WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); this.copartitionedTopicsValidator = new CopartitionedTopicsValidator(streamThread.getName()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionWhenKeyDeserializationFails() throws Exception { final byte[] key = Serdes.Long().serializer().serialize("foo", 1L); final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList( new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, key, recordValue)); queue.addRawRecords(records); }
public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); SourceNode source(); TopicPartition partition(); int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords); StampedRecord poll(); int size(); boolean isEmpty(); long timestamp(); void clear(); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); SourceNode source(); TopicPartition partition(); int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords); StampedRecord poll(); int size(); boolean isEmpty(); long timestamp(); void clear(); }
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionWhenValueDeserializationFails() throws Exception { final byte[] value = Serdes.Long().serializer().serialize("foo", 1L); final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList( new ConsumerRecord<>("topic", 1, 1, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, value)); queue.addRawRecords(records); }
public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); SourceNode source(); TopicPartition partition(); int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords); StampedRecord poll(); int size(); boolean isEmpty(); long timestamp(); void clear(); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); SourceNode source(); TopicPartition partition(); int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords); StampedRecord poll(); int size(); boolean isEmpty(); long timestamp(); void clear(); }
@Test(expected = StreamsException.class) public void shouldThrowOnNegativeTimestamp() { final List<ConsumerRecord<byte[], byte[]>> records = Collections.singletonList( new ConsumerRecord<>("topic", 1, 1, -1L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue)); final RecordQueue queue = new RecordQueue(new TopicPartition(topics[0], 1), new MockSourceNode<>(topics, intDeserializer, intDeserializer), new FailOnInvalidTimestamp()); queue.addRawRecords(records); }
public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); SourceNode source(); TopicPartition partition(); int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords); StampedRecord poll(); int size(); boolean isEmpty(); long timestamp(); void clear(); }
RecordQueue { public int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords) { for (ConsumerRecord<byte[], byte[]> rawRecord : rawRecords) { ConsumerRecord<Object, Object> record = recordDeserializer.deserialize(rawRecord); long timestamp = timestampExtractor.extract(record, timeTracker.get()); log.trace("Source node {} extracted timestamp {} for record {}", source.name(), timestamp, record); if (timestamp < 0) { continue; } StampedRecord stampedRecord = new StampedRecord(record, timestamp); fifoQueue.addLast(stampedRecord); timeTracker.addElement(stampedRecord); } long timestamp = timeTracker.get(); if (timestamp > partitionTime) partitionTime = timestamp; return size(); } RecordQueue(final TopicPartition partition, final SourceNode source, final TimestampExtractor timestampExtractor); SourceNode source(); TopicPartition partition(); int addRawRecords(Iterable<ConsumerRecord<byte[], byte[]>> rawRecords); StampedRecord poll(); int size(); boolean isEmpty(); long timestamp(); void clear(); }
@SuppressWarnings("unchecked") @Test (expected = StreamsException.class) public void shouldThrowStreamsExceptionIfExceptionCaughtDuringInit() throws Exception { final ProcessorNode node = new ProcessorNode("name", new ExceptionalProcessor(), Collections.emptySet()); node.init(null); }
public void init(ProcessorContext context) { this.context = context; try { nodeMetrics = new NodeMetrics(context.metrics(), name, "task." + context.taskId()); nodeMetrics.metrics.measureLatencyNs(time, initDelegate, nodeMetrics.nodeCreationSensor); } catch (Exception e) { throw new StreamsException(String.format("failed to initialize processor %s", name), e); } }
ProcessorNode { public void init(ProcessorContext context) { this.context = context; try { nodeMetrics = new NodeMetrics(context.metrics(), name, "task." + context.taskId()); nodeMetrics.metrics.measureLatencyNs(time, initDelegate, nodeMetrics.nodeCreationSensor); } catch (Exception e) { throw new StreamsException(String.format("failed to initialize processor %s", name), e); } } }
ProcessorNode { public void init(ProcessorContext context) { this.context = context; try { nodeMetrics = new NodeMetrics(context.metrics(), name, "task." + context.taskId()); nodeMetrics.metrics.measureLatencyNs(time, initDelegate, nodeMetrics.nodeCreationSensor); } catch (Exception e) { throw new StreamsException(String.format("failed to initialize processor %s", name), e); } } ProcessorNode(String name); ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores); }
ProcessorNode { public void init(ProcessorContext context) { this.context = context; try { nodeMetrics = new NodeMetrics(context.metrics(), name, "task." + context.taskId()); nodeMetrics.metrics.measureLatencyNs(time, initDelegate, nodeMetrics.nodeCreationSensor); } catch (Exception e) { throw new StreamsException(String.format("failed to initialize processor %s", name), e); } } ProcessorNode(String name); ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores); final String name(); final Processor<K, V> processor(); final List<ProcessorNode<?, ?>> children(); void addChild(ProcessorNode<?, ?> child); void init(ProcessorContext context); void close(); void process(final K key, final V value); void punctuate(long timestamp); @Override String toString(); String toString(String indent); }
ProcessorNode { public void init(ProcessorContext context) { this.context = context; try { nodeMetrics = new NodeMetrics(context.metrics(), name, "task." + context.taskId()); nodeMetrics.metrics.measureLatencyNs(time, initDelegate, nodeMetrics.nodeCreationSensor); } catch (Exception e) { throw new StreamsException(String.format("failed to initialize processor %s", name), e); } } ProcessorNode(String name); ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores); final String name(); final Processor<K, V> processor(); final List<ProcessorNode<?, ?>> children(); void addChild(ProcessorNode<?, ?> child); void init(ProcessorContext context); void close(); void process(final K key, final V value); void punctuate(long timestamp); @Override String toString(); String toString(String indent); final Set<String> stateStores; }
@SuppressWarnings("unchecked") @Test (expected = StreamsException.class) public void shouldThrowStreamsExceptionIfExceptionCaughtDuringClose() throws Exception { final ProcessorNode node = new ProcessorNode("name", new ExceptionalProcessor(), Collections.emptySet()); node.close(); }
public void close() { try { nodeMetrics.metrics.measureLatencyNs(time, closeDelegate, nodeMetrics.nodeDestructionSensor); nodeMetrics.removeAllSensors(); } catch (Exception e) { throw new StreamsException(String.format("failed to close processor %s", name), e); } }
ProcessorNode { public void close() { try { nodeMetrics.metrics.measureLatencyNs(time, closeDelegate, nodeMetrics.nodeDestructionSensor); nodeMetrics.removeAllSensors(); } catch (Exception e) { throw new StreamsException(String.format("failed to close processor %s", name), e); } } }
ProcessorNode { public void close() { try { nodeMetrics.metrics.measureLatencyNs(time, closeDelegate, nodeMetrics.nodeDestructionSensor); nodeMetrics.removeAllSensors(); } catch (Exception e) { throw new StreamsException(String.format("failed to close processor %s", name), e); } } ProcessorNode(String name); ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores); }
ProcessorNode { public void close() { try { nodeMetrics.metrics.measureLatencyNs(time, closeDelegate, nodeMetrics.nodeDestructionSensor); nodeMetrics.removeAllSensors(); } catch (Exception e) { throw new StreamsException(String.format("failed to close processor %s", name), e); } } ProcessorNode(String name); ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores); final String name(); final Processor<K, V> processor(); final List<ProcessorNode<?, ?>> children(); void addChild(ProcessorNode<?, ?> child); void init(ProcessorContext context); void close(); void process(final K key, final V value); void punctuate(long timestamp); @Override String toString(); String toString(String indent); }
ProcessorNode { public void close() { try { nodeMetrics.metrics.measureLatencyNs(time, closeDelegate, nodeMetrics.nodeDestructionSensor); nodeMetrics.removeAllSensors(); } catch (Exception e) { throw new StreamsException(String.format("failed to close processor %s", name), e); } } ProcessorNode(String name); ProcessorNode(String name, Processor<K, V> processor, Set<String> stateStores); final String name(); final Processor<K, V> processor(); final List<ProcessorNode<?, ?>> children(); void addChild(ProcessorNode<?, ?> child); void init(ProcessorContext context); void close(); void process(final K key, final V value); void punctuate(long timestamp); @Override String toString(); String toString(String indent); final Set<String> stateStores; }
@Test public void shouldProvideTopicHeadersAndDataToKeyDeserializer() { final SourceNode<String, String> sourceNode = new MockSourceNode<>(new String[]{""}, new TheExtendedDeserializer(), new TheExtendedDeserializer()); final RecordHeaders headers = new RecordHeaders(); final String deserializeKey = sourceNode.deserializeKey("topic", headers, "data".getBytes(StandardCharsets.UTF_8)); assertThat(deserializeKey, is("topic" + headers + "data")); }
K deserializeKey(String topic, Headers headers, byte[] data) { return keyDeserializer.deserialize(topic, headers, data); }
SourceNode extends ProcessorNode<K, V> { K deserializeKey(String topic, Headers headers, byte[] data) { return keyDeserializer.deserialize(topic, headers, data); } }
SourceNode extends ProcessorNode<K, V> { K deserializeKey(String topic, Headers headers, byte[] data) { return keyDeserializer.deserialize(topic, headers, data); } SourceNode(String name, List<String> topics, TimestampExtractor timestampExtractor, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); SourceNode(String name, List<String> topics, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); }
SourceNode extends ProcessorNode<K, V> { K deserializeKey(String topic, Headers headers, byte[] data) { return keyDeserializer.deserialize(topic, headers, data); } SourceNode(String name, List<String> topics, TimestampExtractor timestampExtractor, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); SourceNode(String name, List<String> topics, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); @SuppressWarnings("unchecked") @Override void init(ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); String toString(String indent); TimestampExtractor getTimestampExtractor(); }
SourceNode extends ProcessorNode<K, V> { K deserializeKey(String topic, Headers headers, byte[] data) { return keyDeserializer.deserialize(topic, headers, data); } SourceNode(String name, List<String> topics, TimestampExtractor timestampExtractor, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); SourceNode(String name, List<String> topics, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); @SuppressWarnings("unchecked") @Override void init(ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); String toString(String indent); TimestampExtractor getTimestampExtractor(); }
@Test public void shouldProvideTopicHeadersAndDataToValueDeserializer() { final SourceNode<String, String> sourceNode = new MockSourceNode<>(new String[]{""}, new TheExtendedDeserializer(), new TheExtendedDeserializer()); final RecordHeaders headers = new RecordHeaders(); final String deserializedValue = sourceNode.deserializeValue("topic", headers, "data".getBytes(StandardCharsets.UTF_8)); assertThat(deserializedValue, is("topic" + headers + "data")); }
V deserializeValue(String topic, Headers headers, byte[] data) { return valDeserializer.deserialize(topic, headers, data); }
SourceNode extends ProcessorNode<K, V> { V deserializeValue(String topic, Headers headers, byte[] data) { return valDeserializer.deserialize(topic, headers, data); } }
SourceNode extends ProcessorNode<K, V> { V deserializeValue(String topic, Headers headers, byte[] data) { return valDeserializer.deserialize(topic, headers, data); } SourceNode(String name, List<String> topics, TimestampExtractor timestampExtractor, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); SourceNode(String name, List<String> topics, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); }
SourceNode extends ProcessorNode<K, V> { V deserializeValue(String topic, Headers headers, byte[] data) { return valDeserializer.deserialize(topic, headers, data); } SourceNode(String name, List<String> topics, TimestampExtractor timestampExtractor, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); SourceNode(String name, List<String> topics, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); @SuppressWarnings("unchecked") @Override void init(ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); String toString(String indent); TimestampExtractor getTimestampExtractor(); }
SourceNode extends ProcessorNode<K, V> { V deserializeValue(String topic, Headers headers, byte[] data) { return valDeserializer.deserialize(topic, headers, data); } SourceNode(String name, List<String> topics, TimestampExtractor timestampExtractor, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); SourceNode(String name, List<String> topics, Deserializer<K> keyDeserializer, Deserializer<V> valDeserializer); @SuppressWarnings("unchecked") @Override void init(ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); String toString(String indent); TimestampExtractor getTimestampExtractor(); }
@Test public void testGetStore() throws IOException { final MockStateStoreSupplier.MockStateStore mockStateStore = new MockStateStoreSupplier.MockStateStore(nonPersistentStoreName, false); final ProcessorStateManager stateMgr = new ProcessorStateManager( new TaskId(0, 1), noPartitions, false, stateDirectory, Collections.<String, String>emptyMap(), changelogReader, false); try { stateMgr.register(mockStateStore, true, mockStateStore.stateRestoreCallback); assertNull(stateMgr.getStore("noSuchStore")); assertEquals(mockStateStore, stateMgr.getStore(nonPersistentStoreName)); } finally { stateMgr.close(Collections.<TopicPartition, Long>emptyMap()); } }
@Override public StateStore getStore(final String name) { return stores.get(name); }
ProcessorStateManager implements StateManager { @Override public StateStore getStore(final String name) { return stores.get(name); } }
ProcessorStateManager implements StateManager { @Override public StateStore getStore(final String name) { return stores.get(name); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); }
ProcessorStateManager implements StateManager { @Override public StateStore getStore(final String name) { return stores.get(name); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); }
ProcessorStateManager implements StateManager { @Override public StateStore getStore(final String name) { return stores.get(name); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); static final String STATE_CHANGELOG_TOPIC_SUFFIX; }
@Test public void shouldThrowLockExceptionIfFailedToLockStateDirectory() throws Exception { final File taskDirectory = stateDirectory.directoryForTask(taskId); final FileChannel channel = FileChannel.open(new File(taskDirectory, StateDirectory.LOCK_FILE_NAME).toPath(), StandardOpenOption.CREATE, StandardOpenOption.WRITE); final FileLock lock = channel.lock(); try { new ProcessorStateManager( taskId, noPartitions, false, stateDirectory, Collections.<String, String>emptyMap(), changelogReader, false); fail("Should have thrown LockException"); } catch (final LockException e) { } finally { lock.release(); channel.close(); } }
@Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); static final String STATE_CHANGELOG_TOPIC_SUFFIX; }
@Test public void testDeleteConnectorNotLeader() throws Throwable { final Capture<Callback<Herder.Created<ConnectorInfo>>> cb = Capture.newInstance(); herder.deleteConnectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackNotLeaderException(cb); EasyMock.expect(RestServer.httpRequest("http: .andReturn(new RestServer.HttpResponse<>(204, new HashMap<String, List<String>>(), null)); PowerMock.replayAll(); connectorsResource.destroyConnector(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); }
@DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } ConnectorsResource(Herder herder); }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
ConnectorsResource { @DELETE @Path("/{connector}") public void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector, final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector, final @PathParam("task") Integer task, final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward); }
@Test public void shouldThrowIllegalArgumentExceptionIfStoreNameIsSameAsCheckpointFileName() throws Exception { final ProcessorStateManager stateManager = new ProcessorStateManager( taskId, noPartitions, false, stateDirectory, Collections.<String, String>emptyMap(), changelogReader, false); try { stateManager.register(new MockStateStoreSupplier.MockStateStore(ProcessorStateManager.CHECKPOINT_FILE_NAME, true), true, null); fail("should have thrown illegal argument exception when store name same as checkpoint file"); } catch (final IllegalArgumentException e) { } }
@Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); static final String STATE_CHANGELOG_TOPIC_SUFFIX; }
@Test public void shouldThrowIllegalArgumentExceptionOnRegisterWhenStoreHasAlreadyBeenRegistered() throws Exception { final ProcessorStateManager stateManager = new ProcessorStateManager( taskId, noPartitions, false, stateDirectory, Collections.<String, String>emptyMap(), changelogReader, false); stateManager.register(mockStateStore, false, null); try { stateManager.register(mockStateStore, false, null); fail("should have thrown illegal argument exception when store with same name already registered"); } catch (final IllegalArgumentException e) { } }
@Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); }
ProcessorStateManager implements StateManager { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { log.debug("{} Registering state store {} to its state manager", logPrefix, store.name()); if (store.name().equals(CHECKPOINT_FILE_NAME)) { throw new IllegalArgumentException(String.format("%s Illegal store name: %s", logPrefix, CHECKPOINT_FILE_NAME)); } if (stores.containsKey(store.name())) { throw new IllegalArgumentException(String.format("%s Store %s has already been registered.", logPrefix, store.name())); } final String topic = storeToChangelogTopic.get(store.name()); if (topic == null) { stores.put(store.name(), store); return; } final TopicPartition storePartition = new TopicPartition(topic, getPartition(topic)); changelogReader.validatePartitionExists(storePartition, store.name()); if (isStandby) { if (store.persistent()) { log.trace("{} Preparing standby replica of persistent state store {} with changelog topic {}", logPrefix, store.name(), topic); restoreCallbacks.put(topic, stateRestoreCallback); } } else { log.trace("{} Restoring state store {} from changelog topic {}", logPrefix, store.name(), topic); final StateRestorer restorer = new StateRestorer(storePartition, stateRestoreCallback, checkpointedOffsets.get(storePartition), offsetLimit(storePartition), store.persistent()); changelogReader.register(restorer); } stores.put(store.name(), store); } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); static final String STATE_CHANGELOG_TOPIC_SUFFIX; }
@Test public void shouldDeleteCheckpointFileOnCreationIfEosEnabled() throws Exception { checkpoint.write(Collections.<TopicPartition, Long>emptyMap()); assertTrue(checkpointFile.exists()); ProcessorStateManager stateManager = null; try { stateManager = new ProcessorStateManager( taskId, noPartitions, false, stateDirectory, Collections.<String, String>emptyMap(), changelogReader, true); assertFalse(checkpointFile.exists()); } finally { if (stateManager != null) { stateManager.close(null); } } }
@Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); }
ProcessorStateManager implements StateManager { @Override public void close(final Map<TopicPartition, Long> ackedOffsets) throws ProcessorStateException { RuntimeException firstException = null; try { if (!stores.isEmpty()) { log.debug("{} Closing its state manager and all the registered state stores", logPrefix); for (final Map.Entry<String, StateStore> entry : stores.entrySet()) { log.debug("{} Closing storage engine {}", logPrefix, entry.getKey()); try { entry.getValue().close(); } catch (final Exception e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to close state store %s", logPrefix, entry.getKey()), e); } log.error("{} Failed to close state store {}: ", logPrefix, entry.getKey(), e); } } if (ackedOffsets != null) { checkpoint(ackedOffsets); } } } finally { try { stateDirectory.unlock(taskId); } catch (final IOException e) { if (firstException == null) { firstException = new ProcessorStateException(String.format("%s Failed to release state dir lock", logPrefix), e); } log.error("{} Failed to release state dir lock: ", logPrefix, e); } } if (firstException != null) { throw firstException; } } ProcessorStateManager(final TaskId taskId, final Collection<TopicPartition> sources, final boolean isStandby, final StateDirectory stateDirectory, final Map<String, String> storeToChangelogTopic, final ChangelogReader changelogReader, final boolean eosEnabled); static String storeChangelogTopic(final String applicationId, final String storeName); @Override File baseDir(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override Map<TopicPartition, Long> checkpointed(); @Override StateStore getStore(final String name); @Override void flush(); @Override void close(final Map<TopicPartition, Long> ackedOffsets); @Override void checkpoint(final Map<TopicPartition, Long> ackedOffsets); @Override StateStore getGlobalStore(final String name); static final String STATE_CHANGELOG_TOPIC_SUFFIX; }
@Test public void shouldReturnNotKnownTimestampWhenNoRecordsEverAdded() throws Exception { assertThat(tracker.get(), equalTo(TimestampTracker.NOT_KNOWN)); }
public long get() { Stamped<E> stamped = ascendingSubsequence.peekFirst(); if (stamped == null) return lastKnownTime; else return stamped.timestamp; }
MinTimestampTracker implements TimestampTracker<E> { public long get() { Stamped<E> stamped = ascendingSubsequence.peekFirst(); if (stamped == null) return lastKnownTime; else return stamped.timestamp; } }
MinTimestampTracker implements TimestampTracker<E> { public long get() { Stamped<E> stamped = ascendingSubsequence.peekFirst(); if (stamped == null) return lastKnownTime; else return stamped.timestamp; } }
MinTimestampTracker implements TimestampTracker<E> { public long get() { Stamped<E> stamped = ascendingSubsequence.peekFirst(); if (stamped == null) return lastKnownTime; else return stamped.timestamp; } void addElement(final Stamped<E> elem); void removeElement(final Stamped<E> elem); int size(); long get(); }
MinTimestampTracker implements TimestampTracker<E> { public long get() { Stamped<E> stamped = ascendingSubsequence.peekFirst(); if (stamped == null) return lastKnownTime; else return stamped.timestamp; } void addElement(final Stamped<E> elem); void removeElement(final Stamped<E> elem); int size(); long get(); }
@Test public void shouldIgnoreNullRecordOnRemove() throws Exception { tracker.removeElement(null); }
public void removeElement(final Stamped<E> elem) { if (elem == null) { return; } if (ascendingSubsequence.peekFirst() == elem) { ascendingSubsequence.removeFirst(); } if (ascendingSubsequence.isEmpty()) { lastKnownTime = elem.timestamp; } }
MinTimestampTracker implements TimestampTracker<E> { public void removeElement(final Stamped<E> elem) { if (elem == null) { return; } if (ascendingSubsequence.peekFirst() == elem) { ascendingSubsequence.removeFirst(); } if (ascendingSubsequence.isEmpty()) { lastKnownTime = elem.timestamp; } } }
MinTimestampTracker implements TimestampTracker<E> { public void removeElement(final Stamped<E> elem) { if (elem == null) { return; } if (ascendingSubsequence.peekFirst() == elem) { ascendingSubsequence.removeFirst(); } if (ascendingSubsequence.isEmpty()) { lastKnownTime = elem.timestamp; } } }
MinTimestampTracker implements TimestampTracker<E> { public void removeElement(final Stamped<E> elem) { if (elem == null) { return; } if (ascendingSubsequence.peekFirst() == elem) { ascendingSubsequence.removeFirst(); } if (ascendingSubsequence.isEmpty()) { lastKnownTime = elem.timestamp; } } void addElement(final Stamped<E> elem); void removeElement(final Stamped<E> elem); int size(); long get(); }
MinTimestampTracker implements TimestampTracker<E> { public void removeElement(final Stamped<E> elem) { if (elem == null) { return; } if (ascendingSubsequence.peekFirst() == elem) { ascendingSubsequence.removeFirst(); } if (ascendingSubsequence.isEmpty()) { lastKnownTime = elem.timestamp; } } void addElement(final Stamped<E> elem); void removeElement(final Stamped<E> elem); int size(); long get(); }
@Test(expected = NullPointerException.class) public void shouldThrowNullPointerExceptionWhenTryingToAddNullElement() throws Exception { tracker.addElement(null); }
public void addElement(final Stamped<E> elem) { if (elem == null) throw new NullPointerException(); Stamped<E> maxElem = ascendingSubsequence.peekLast(); while (maxElem != null && maxElem.timestamp >= elem.timestamp) { ascendingSubsequence.removeLast(); maxElem = ascendingSubsequence.peekLast(); } ascendingSubsequence.offerLast(elem); }
MinTimestampTracker implements TimestampTracker<E> { public void addElement(final Stamped<E> elem) { if (elem == null) throw new NullPointerException(); Stamped<E> maxElem = ascendingSubsequence.peekLast(); while (maxElem != null && maxElem.timestamp >= elem.timestamp) { ascendingSubsequence.removeLast(); maxElem = ascendingSubsequence.peekLast(); } ascendingSubsequence.offerLast(elem); } }
MinTimestampTracker implements TimestampTracker<E> { public void addElement(final Stamped<E> elem) { if (elem == null) throw new NullPointerException(); Stamped<E> maxElem = ascendingSubsequence.peekLast(); while (maxElem != null && maxElem.timestamp >= elem.timestamp) { ascendingSubsequence.removeLast(); maxElem = ascendingSubsequence.peekLast(); } ascendingSubsequence.offerLast(elem); } }
MinTimestampTracker implements TimestampTracker<E> { public void addElement(final Stamped<E> elem) { if (elem == null) throw new NullPointerException(); Stamped<E> maxElem = ascendingSubsequence.peekLast(); while (maxElem != null && maxElem.timestamp >= elem.timestamp) { ascendingSubsequence.removeLast(); maxElem = ascendingSubsequence.peekLast(); } ascendingSubsequence.offerLast(elem); } void addElement(final Stamped<E> elem); void removeElement(final Stamped<E> elem); int size(); long get(); }
MinTimestampTracker implements TimestampTracker<E> { public void addElement(final Stamped<E> elem) { if (elem == null) throw new NullPointerException(); Stamped<E> maxElem = ascendingSubsequence.peekLast(); while (maxElem != null && maxElem.timestamp >= elem.timestamp) { ascendingSubsequence.removeLast(); maxElem = ascendingSubsequence.peekLast(); } ascendingSubsequence.offerLast(elem); } void addElement(final Stamped<E> elem); void removeElement(final Stamped<E> elem); int size(); long get(); }
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfKeyFailsToDeserialize() throws Exception { final SourceNodeRecordDeserializer recordDeserializer = new SourceNodeRecordDeserializer( new TheSourceNode(true, false)); recordDeserializer.deserialize(rawRecord); }
@Override public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) { final Object key; try { key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } final Object value; try { value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value); }
SourceNodeRecordDeserializer implements RecordDeserializer { @Override public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) { final Object key; try { key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } final Object value; try { value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value); } }
SourceNodeRecordDeserializer implements RecordDeserializer { @Override public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) { final Object key; try { key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } final Object value; try { value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value); } SourceNodeRecordDeserializer(final SourceNode sourceNode); }
SourceNodeRecordDeserializer implements RecordDeserializer { @Override public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) { final Object key; try { key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } final Object value; try { value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value); } SourceNodeRecordDeserializer(final SourceNode sourceNode); @Override ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord); }
SourceNodeRecordDeserializer implements RecordDeserializer { @Override public ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord) { final Object key; try { key = sourceNode.deserializeKey(rawRecord.topic(), rawRecord.headers(), rawRecord.key()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize key for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } final Object value; try { value = sourceNode.deserializeValue(rawRecord.topic(), rawRecord.headers(), rawRecord.value()); } catch (Exception e) { throw new StreamsException(format("Failed to deserialize value for record. topic=%s, partition=%d, offset=%d", rawRecord.topic(), rawRecord.partition(), rawRecord.offset()), e); } return new ConsumerRecord<>(rawRecord.topic(), rawRecord.partition(), rawRecord.offset(), rawRecord.timestamp(), TimestampType.CREATE_TIME, rawRecord.checksum(), rawRecord.serializedKeySize(), rawRecord.serializedValueSize(), key, value); } SourceNodeRecordDeserializer(final SourceNode sourceNode); @Override ConsumerRecord<Object, Object> deserialize(final ConsumerRecord<byte[], byte[]> rawRecord); }