target
stringlengths
20
113k
src_fm
stringlengths
11
86.3k
src_fm_fc
stringlengths
21
86.4k
src_fm_fc_co
stringlengths
30
86.4k
src_fm_fc_ms
stringlengths
42
86.8k
src_fm_fc_ms_ff
stringlengths
43
86.8k
@Test(expected = CommitFailedException.class) public void testCommitOffsetUnknownMemberId() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.UNKNOWN_MEMBER_ID))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = CommitFailedException.class) public void testCommitOffsetRebalanceInProgress() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.REBALANCE_IN_PROGRESS))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = KafkaException.class) public void testCommitOffsetSyncCallbackWithNonRetriableException() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.UNKNOWN)), false); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = IllegalArgumentException.class) public void testCommitSyncNegativeOffset() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(-1L)), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRefreshOffset() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRefreshOffsetLoadInProgress() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRefreshOffsetsGroupNotAuthorized() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED)); try { coordinator.refreshCommittedOffsetsIfNeeded(); fail("Expected group authorization error"); } catch (GroupAuthorizationException e) { assertEquals(groupId, e.groupId()); } }
public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = KafkaException.class) public void testRefreshOffsetUnknownTopicOrPartition() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); }
public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRefreshOffsetNotCoordinatorForConsumer() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR)); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", 100L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRefreshOffsetWithNoFetchableOffsets() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); subscriptions.assignFromUser(singleton(t1p)); subscriptions.needRefreshCommits(); client.prepareResponse(offsetFetchResponse(t1p, Errors.NONE, "", -1L)); coordinator.refreshCommittedOffsetsIfNeeded(); assertFalse(subscriptions.refreshCommitsNeeded()); assertEquals(null, subscriptions.committed(t1p)); }
public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void refreshCommittedOffsetsIfNeeded() { if (subscriptions.refreshCommitsNeeded()) { Map<TopicPartition, OffsetAndMetadata> offsets = fetchCommittedOffsets(subscriptions.assignedPartitions()); for (Map.Entry<TopicPartition, OffsetAndMetadata> entry : offsets.entrySet()) { TopicPartition tp = entry.getKey(); if (subscriptions.isAssigned(tp)) this.subscriptions.committed(tp, entry.getValue()); } this.subscriptions.commitsRefreshed(); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void timestampToConnectWithDefaultValue() { Schema schema = Timestamp.builder().defaultValue(new java.util.Date(42)).schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"default\": 42 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(new java.util.Date(42), schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testProtocolMetadataOrder() { RoundRobinAssignor roundRobin = new RoundRobinAssignor(); RangeAssignor range = new RangeAssignor(); try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(roundRobin, range), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(roundRobin.name(), metadata.get(0).name()); assertEquals(range.name(), metadata.get(1).name()); } try (Metrics metrics = new Metrics(time)) { ConsumerCoordinator coordinator = buildCoordinator(metrics, Arrays.<PartitionAssignor>asList(range, roundRobin), ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, false, true); List<ProtocolMetadata> metadata = coordinator.metadata(); assertEquals(2, metadata.size()); assertEquals(range.name(), metadata.get(0).name()); assertEquals(roundRobin.name(), metadata.get(1).name()); } }
@Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; }
ConsumerCoordinator extends AbstractCoordinator { @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; } }
ConsumerCoordinator extends AbstractCoordinator { @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public List<ProtocolMetadata> metadata() { this.joinedSubscription = subscriptions.subscription(); List<ProtocolMetadata> metadataList = new ArrayList<>(); for (PartitionAssignor assignor : assignors) { Subscription subscription = assignor.subscription(joinedSubscription); ByteBuffer metadata = ConsumerProtocol.serializeSubscription(subscription); metadataList.add(new ProtocolMetadata(assignor.name(), metadata)); } return metadataList; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void send() { client.prepareResponse(heartbeatResponse(Errors.NONE)); RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat()); assertEquals(1, consumerClient.pendingRequestCount()); assertEquals(1, consumerClient.pendingRequestCount(node)); assertFalse(future.isDone()); consumerClient.poll(future); assertTrue(future.isDone()); assertTrue(future.succeeded()); ClientResponse clientResponse = future.value(); HeartbeatResponse response = (HeartbeatResponse) clientResponse.responseBody(); assertEquals(Errors.NONE, response.error()); }
public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler); unsent.put(node, clientRequest); client.wakeup(); return completionHandler.future; }
ConsumerNetworkClient implements Closeable { public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler); unsent.put(node, clientRequest); client.wakeup(); return completionHandler.future; } }
ConsumerNetworkClient implements Closeable { public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler); unsent.put(node, clientRequest); client.wakeup(); return completionHandler.future; } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); }
ConsumerNetworkClient implements Closeable { public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler); unsent.put(node, clientRequest); client.wakeup(); return completionHandler.future; } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
ConsumerNetworkClient implements Closeable { public RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder) { long now = time.milliseconds(); RequestFutureCompletionHandler completionHandler = new RequestFutureCompletionHandler(); ClientRequest clientRequest = client.newClientRequest(node.idString(), requestBuilder, now, true, completionHandler); unsent.put(node, clientRequest); client.wakeup(); return completionHandler.future; } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void doNotBlockIfPollConditionIsSatisfied() { NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(0L), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList()); EasyMock.replay(mockNetworkClient); consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() { @Override public boolean shouldBlock() { return false; } }); EasyMock.verify(mockNetworkClient); }
public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void blockWhenPollConditionNotSatisfied() { long timeout = 4000L; NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, 100, 1000); EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(1); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(timeout), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList()); EasyMock.replay(mockNetworkClient); consumerClient.poll(timeout, time.milliseconds(), new ConsumerNetworkClient.PollCondition() { @Override public boolean shouldBlock() { return true; } }); EasyMock.verify(mockNetworkClient); }
public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void blockOnlyForRetryBackoffIfNoInflightRequests() { long retryBackoffMs = 100L; NetworkClient mockNetworkClient = EasyMock.mock(NetworkClient.class); ConsumerNetworkClient consumerClient = new ConsumerNetworkClient(mockNetworkClient, metadata, time, retryBackoffMs, 1000L); EasyMock.expect(mockNetworkClient.inFlightRequestCount()).andReturn(0); EasyMock.expect(mockNetworkClient.poll(EasyMock.eq(retryBackoffMs), EasyMock.anyLong())).andReturn(Collections.<ClientResponse>emptyList()); EasyMock.replay(mockNetworkClient); consumerClient.poll(Long.MAX_VALUE, time.milliseconds(), new ConsumerNetworkClient.PollCondition() { @Override public boolean shouldBlock() { return true; } }); EasyMock.verify(mockNetworkClient); }
public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
ConsumerNetworkClient implements Closeable { public void poll(RequestFuture<?> future) { while (!future.isDone()) poll(MAX_POLL_TIMEOUT_MS, time.milliseconds(), future); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void wakeup() { RequestFuture<ClientResponse> future = consumerClient.send(node, heartbeat()); consumerClient.wakeup(); try { consumerClient.poll(0); fail(); } catch (WakeupException e) { } client.respond(heartbeatResponse(Errors.NONE)); consumerClient.poll(future); assertTrue(future.isDone()); }
public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); }
ConsumerNetworkClient implements Closeable { public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } }
ConsumerNetworkClient implements Closeable { public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); }
ConsumerNetworkClient implements Closeable { public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
ConsumerNetworkClient implements Closeable { public void wakeup() { log.trace("Received user wakeup"); this.wakeup.set(true); this.client.wakeup(); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void testAwaitForMetadataUpdateWithTimeout() { assertFalse(consumerClient.awaitMetadataUpdate(10L)); }
public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); }
ConsumerNetworkClient implements Closeable { public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); } }
ConsumerNetworkClient implements Closeable { public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); }
ConsumerNetworkClient implements Closeable { public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
ConsumerNetworkClient implements Closeable { public void awaitMetadataUpdate() { awaitMetadataUpdate(Long.MAX_VALUE); } ConsumerNetworkClient(KafkaClient client, Metadata metadata, Time time, long retryBackoffMs, long requestTimeoutMs); RequestFuture<ClientResponse> send(Node node, AbstractRequest.Builder<?> requestBuilder); synchronized Node leastLoadedNode(); synchronized boolean hasReadyNodes(); void awaitMetadataUpdate(); boolean awaitMetadataUpdate(long timeout); void ensureFreshMetadata(); void wakeup(); void poll(RequestFuture<?> future); boolean poll(RequestFuture<?> future, long timeout); void poll(long timeout); void poll(long timeout, long now, PollCondition pollCondition); void poll(long timeout, long now, PollCondition pollCondition, boolean disableWakeup); void pollNoWakeup(); boolean awaitPendingRequests(Node node, long timeoutMs); int pendingRequestCount(Node node); boolean hasPendingRequests(Node node); int pendingRequestCount(); boolean hasPendingRequests(); void failUnsentRequests(Node node, RuntimeException e); void maybeTriggerWakeup(); void disableWakeups(); @Override void close(); boolean connectionFailed(Node node); void tryConnect(Node node); }
@Test public void testShouldHeartbeat() { heartbeat.sentHeartbeat(time.milliseconds()); time.sleep((long) ((float) interval * 1.1)); assertTrue(heartbeat.shouldHeartbeat(time.milliseconds())); }
public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; }
Heartbeat { public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; } }
Heartbeat { public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); }
Heartbeat { public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
Heartbeat { public boolean shouldHeartbeat(long now) { return timeToNextHeartbeat(now) == 0; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
@Test public void testTimeToNextHeartbeat() { heartbeat.sentHeartbeat(0); assertEquals(100, heartbeat.timeToNextHeartbeat(0)); assertEquals(0, heartbeat.timeToNextHeartbeat(100)); assertEquals(0, heartbeat.timeToNextHeartbeat(200)); }
public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextHeartbeat) return 0; else return delayToNextHeartbeat - timeSinceLastHeartbeat; }
Heartbeat { public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextHeartbeat) return 0; else return delayToNextHeartbeat - timeSinceLastHeartbeat; } }
Heartbeat { public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextHeartbeat) return 0; else return delayToNextHeartbeat - timeSinceLastHeartbeat; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); }
Heartbeat { public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextHeartbeat) return 0; else return delayToNextHeartbeat - timeSinceLastHeartbeat; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
Heartbeat { public long timeToNextHeartbeat(long now) { long timeSinceLastHeartbeat = now - Math.max(lastHeartbeatSend, lastSessionReset); final long delayToNextHeartbeat; if (heartbeatFailed) delayToNextHeartbeat = retryBackoffMs; else delayToNextHeartbeat = heartbeatInterval; if (timeSinceLastHeartbeat > delayToNextHeartbeat) return 0; else return delayToNextHeartbeat - timeSinceLastHeartbeat; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
@Test public void testSessionTimeoutExpired() { heartbeat.sentHeartbeat(time.milliseconds()); time.sleep(305); assertTrue(heartbeat.sessionTimeoutExpired(time.milliseconds())); }
public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; }
Heartbeat { public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; } }
Heartbeat { public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); }
Heartbeat { public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
Heartbeat { public boolean sessionTimeoutExpired(long now) { return now - Math.max(lastSessionReset, lastHeartbeatReceive) > sessionTimeout; } Heartbeat(long sessionTimeout, long heartbeatInterval, long maxPollInterval, long retryBackoffMs); void poll(long now); void sentHeartbeat(long now); void failHeartbeat(); void receiveHeartbeat(long now); boolean shouldHeartbeat(long now); long lastHeartbeatSend(); long timeToNextHeartbeat(long now); boolean sessionTimeoutExpired(long now); long interval(); void resetTimeouts(long now); boolean pollTimeoutExpired(long now); }
@Test public void timestampToConnectOptionalWithDefaultValue() { Schema schema = Timestamp.builder().optional().defaultValue(new java.util.Date(42)).schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"optional\": true, \"default\": 42 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(new java.util.Date(42), schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testCoordinatorDiscoveryBackoff() { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.blackout(coordinatorNode, 50L); long initialTime = mockTime.milliseconds(); coordinator.ensureCoordinatorReady(); long endTime = mockTime.milliseconds(); assertTrue(endTime - initialTime >= RETRY_BACKOFF_MS); }
public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); }
AbstractCoordinator implements Closeable { public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); } }
AbstractCoordinator implements Closeable { public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public synchronized void ensureCoordinatorReady() { ensureCoordinatorReady(0, Long.MAX_VALUE); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testLookupCoordinator() throws Exception { mockClient.setNode(null); RequestFuture<Void> noBrokersAvailableFuture = coordinator.lookupCoordinator(); assertTrue("Failed future expected", noBrokersAvailableFuture.failed()); mockClient.setNode(node); RequestFuture<Void> future = coordinator.lookupCoordinator(); assertFalse("Request not sent", future.isDone()); assertTrue("New request sent while one is in progress", future == coordinator.lookupCoordinator()); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); assertTrue("New request not sent after previous completed", future != coordinator.lookupCoordinator()); }
protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.noBrokersAvailable(); } else findCoordinatorFuture = sendGroupCoordinatorRequest(node); } return findCoordinatorFuture; }
AbstractCoordinator implements Closeable { protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.noBrokersAvailable(); } else findCoordinatorFuture = sendGroupCoordinatorRequest(node); } return findCoordinatorFuture; } }
AbstractCoordinator implements Closeable { protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.noBrokersAvailable(); } else findCoordinatorFuture = sendGroupCoordinatorRequest(node); } return findCoordinatorFuture; } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.noBrokersAvailable(); } else findCoordinatorFuture = sendGroupCoordinatorRequest(node); } return findCoordinatorFuture; } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { protected synchronized RequestFuture<Void> lookupCoordinator() { if (findCoordinatorFuture == null) { Node node = this.client.leastLoadedNode(); if (node == null) { log.debug("No broker available to send GroupCoordinator request for group {}", groupId); return RequestFuture.noBrokersAvailable(); } else findCoordinatorFuture = sendGroupCoordinatorRequest(node); } return findCoordinatorFuture; } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterJoinGroupSent() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest && invocations == 1) throw new WakeupException(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterJoinGroupSentExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest && invocations == 1) throw new WakeupException(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterJoinGroupReceived() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest) consumerClient.wakeup(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterJoinGroupReceivedExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isJoinGroupRequest = body instanceof JoinGroupRequest; if (isJoinGroupRequest) consumerClient.wakeup(); return isJoinGroupRequest; } }, joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterSyncGroupSent() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest && invocations == 1) throw new WakeupException(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterSyncGroupSentExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { private int invocations = 0; @Override public boolean matches(AbstractRequest body) { invocations++; boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest && invocations == 1) throw new WakeupException(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterSyncGroupReceived() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest) consumerClient.wakeup(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exception { mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, "memberId", "leaderId", Errors.NONE)); mockClient.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { boolean isSyncGroupRequest = body instanceof SyncGroupRequest; if (isSyncGroupRequest) consumerClient.wakeup(); return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); fail("Should have woken up from ensureActiveGroup()"); } catch (WakeupException e) { } assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); assertFalse(heartbeatReceived.get()); consumerClient.poll(0); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); awaitFirstHeartbeat(heartbeatReceived); }
public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
AbstractCoordinator implements Closeable { public void ensureActiveGroup() { ensureCoordinatorReady(); startHeartbeatThreadIfNeeded(); joinGroupIfNeeded(); } AbstractCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean leaveGroupOnClose); synchronized void ensureCoordinatorReady(); void ensureActiveGroup(); boolean coordinatorUnknown(); @Override final void close(); synchronized void maybeLeaveGroup(); }
@Test public void testJsonSchemaMetadataTranslation() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); converted = parse(converter.fromConnectData(TOPIC, Schema.OPTIONAL_BOOLEAN_SCHEMA, null)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": true }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isNull()); converted = parse(converter.fromConnectData(TOPIC, SchemaBuilder.bool().defaultValue(true).build(), true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false, \"default\": true }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); converted = parse(converter.fromConnectData(TOPIC, SchemaBuilder.bool().required().name("bool").version(3).doc("the documentation").parameter("foo", "bar").build(), true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false, \"name\": \"bool\", \"version\": 3, \"doc\": \"the documentation\", \"parameters\": { \"foo\": \"bar\" }}"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test(expected = IllegalStateException.class) public void cantChangePositionForNonAssignedPartition() { state.position(tp0, 1); }
public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); }
SubscriptionState { public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); } }
SubscriptionState { public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); }
SubscriptionState { public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
SubscriptionState { public void position(TopicPartition tp, long offset) { assignedState(tp).position(offset); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
@Test(expected = IllegalStateException.class) public void cantSubscribeTopicAndPattern() { state.subscribe(singleton(topic), rebalanceListener); state.subscribe(Pattern.compile(".*"), rebalanceListener); }
public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
@Test(expected = IllegalStateException.class) public void cantSubscribePatternAndTopic() { state.subscribe(Pattern.compile(".*"), rebalanceListener); state.subscribe(singleton(topic), rebalanceListener); }
public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
SubscriptionState { public void subscribe(Set<String> topics, ConsumerRebalanceListener listener) { if (listener == null) throw new IllegalArgumentException("RebalanceListener cannot be null"); setSubscriptionType(SubscriptionType.AUTO_TOPICS); this.listener = listener; changeSubscription(topics); } SubscriptionState(OffsetResetStrategy defaultResetStrategy); void subscribe(Set<String> topics, ConsumerRebalanceListener listener); void subscribeFromPattern(Set<String> topics); void groupSubscribe(Collection<String> topics); void resetGroupSubscription(); void assignFromUser(Set<TopicPartition> partitions); void assignFromSubscribed(Collection<TopicPartition> assignments); void subscribe(Pattern pattern, ConsumerRebalanceListener listener); boolean hasPatternSubscription(); boolean hasNoSubscriptionOrUserAssignment(); void unsubscribe(); Pattern subscribedPattern(); Set<String> subscription(); Set<TopicPartition> pausedPartitions(); Set<String> groupSubscription(); void committed(TopicPartition tp, OffsetAndMetadata offset); OffsetAndMetadata committed(TopicPartition tp); void needRefreshCommits(); boolean refreshCommitsNeeded(); void commitsRefreshed(); void seek(TopicPartition tp, long offset); Set<TopicPartition> assignedPartitions(); List<TopicPartition> fetchablePartitions(); boolean partitionsAutoAssigned(); void position(TopicPartition tp, long offset); Long position(TopicPartition tp); Long partitionLag(TopicPartition tp, IsolationLevel isolationLevel); void updateHighWatermark(TopicPartition tp, long highWatermark); void updateLastStableOffset(TopicPartition tp, long lastStableOffset); Map<TopicPartition, OffsetAndMetadata> allConsumed(); void needOffsetReset(TopicPartition partition, OffsetResetStrategy offsetResetStrategy); void needOffsetReset(TopicPartition partition); boolean hasDefaultOffsetResetPolicy(); boolean isOffsetResetNeeded(TopicPartition partition); OffsetResetStrategy resetStrategy(TopicPartition partition); boolean hasAllFetchPositions(Collection<TopicPartition> partitions); boolean hasAllFetchPositions(); Set<TopicPartition> missingFetchPositions(); boolean isAssigned(TopicPartition tp); boolean isPaused(TopicPartition tp); boolean isFetchable(TopicPartition tp); boolean hasValidPosition(TopicPartition tp); void pause(TopicPartition tp); void resume(TopicPartition tp); void movePartitionToEnd(TopicPartition tp); ConsumerRebalanceListener listener(); void addListener(Listener listener); void fireOnAssignment(Set<TopicPartition> assignment); }
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testOneConsumerNonexistentTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testOneConsumerOneTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertAssignment(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testOnlyAssignsPartitionsFromSubscribedTopics() { String topic = "topic"; String otherTopic = "other"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); partitionsPerTopic.put(otherTopic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertAssignment(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testOneConsumerMultipleTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 1); partitionsPerTopic.put(topic2, 2); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertAssignment(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic, 0)), assignment.get(consumer1)); assertAssignment(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testTwoConsumersOneTopicTwoPartitions() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic, 0)), assignment.get(consumer1)); assertAssignment(partitions(tp(topic, 1)), assignment.get(consumer2)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testCacheSchemaToConnectConversion() { Cache<JsonNode, Schema> cache = Whitebox.getInternalState(converter, "toConnectSchemaCache"); assertEquals(0, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes()); assertEquals(1, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes()); assertEquals(1, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": true }, \"payload\": true }".getBytes()); assertEquals(2, cache.size()); converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": false }, \"payload\": true }".getBytes()); assertEquals(3, cache.size()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testMultipleConsumersMixedTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); consumers.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic1, 0)), assignment.get(consumer1)); assertAssignment(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertAssignment(partitions(tp(topic1, 2)), assignment.get(consumer3)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testTwoConsumersTwoTopicsSixPartitions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1, topic2))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertAssignment(partitions(tp(topic1, 0), tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer1)); assertAssignment(partitions(tp(topic1, 2), tp(topic2, 2)), assignment.get(consumer2)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
RangeAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<String>> consumersPerTopic = consumersPerTopic(subscriptions); Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); for (Map.Entry<String, List<String>> topicEntry : consumersPerTopic.entrySet()) { String topic = topicEntry.getKey(); List<String> consumersForTopic = topicEntry.getValue(); Integer numPartitionsForTopic = partitionsPerTopic.get(topic); if (numPartitionsForTopic == null) continue; Collections.sort(consumersForTopic); int numPartitionsPerConsumer = numPartitionsForTopic / consumersForTopic.size(); int consumersWithExtraPartition = numPartitionsForTopic % consumersForTopic.size(); List<TopicPartition> partitions = AbstractPartitionAssignor.partitions(topic, numPartitionsForTopic); for (int i = 0, n = consumersForTopic.size(); i < n; i++) { int start = numPartitionsPerConsumer * i + Math.min(i, consumersWithExtraPartition); int length = numPartitionsPerConsumer + (i + 1 > consumersWithExtraPartition ? 0 : 1); assignment.get(consumersForTopic.get(i)).addAll(partitions.subList(start, start + length)); } } return assignment; } @Override String name(); @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); }
@Test public void testOsDefaultSocketBufferSizes() throws Exception { Map<String, Object> config = new HashMap<>(); config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); config.put(ConsumerConfig.SEND_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); config.put(ConsumerConfig.RECEIVE_BUFFER_CONFIG, Selectable.USE_DEFAULT_BUFFER_SIZE); KafkaConsumer<byte[], byte[]> consumer = new KafkaConsumer<>( config, new ByteArrayDeserializer(), new ByteArrayDeserializer()); consumer.close(); }
@Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void testSubscription() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.subscribe(singletonList(topic)); assertEquals(singleton(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.<String>emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.assign(singletonList(tp0)); assertTrue(consumer.subscription().isEmpty()); assertEquals(singleton(tp0), consumer.assignment()); consumer.unsubscribe(); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); consumer.close(); }
public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } }
KafkaConsumer implements Consumer<K, V> { public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } }
KafkaConsumer implements Consumer<K, V> { public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); }
KafkaConsumer implements Consumer<K, V> { public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
KafkaConsumer implements Consumer<K, V> { public Set<String> subscription() { acquire(); try { return Collections.unmodifiableSet(new HashSet<>(this.subscriptions.subscription())); } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void testInterceptorConstructorClose() throws Exception { try { Properties props = new Properties(); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName()); KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>( props, new StringDeserializer(), new StringDeserializer()); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(0, MockConsumerInterceptor.CLOSE_COUNT.get()); consumer.close(); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); Assert.assertNull(MockConsumerInterceptor.CLUSTER_META.get()); } finally { MockConsumerInterceptor.resetCounters(); } }
@Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void testPause() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.assign(singletonList(tp0)); assertEquals(singleton(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); consumer.pause(singleton(tp0)); assertEquals(singleton(tp0), consumer.paused()); consumer.resume(singleton(tp0)); assertTrue(consumer.paused().isEmpty()); consumer.unsubscribe(); assertTrue(consumer.paused().isEmpty()); consumer.close(); }
@Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } }
KafkaConsumer implements Consumer<K, V> { @Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } } }
KafkaConsumer implements Consumer<K, V> { @Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); }
KafkaConsumer implements Consumer<K, V> { @Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
KafkaConsumer implements Consumer<K, V> { @Override public void pause(Collection<TopicPartition> partitions) { acquire(); try { for (TopicPartition partition: partitions) { log.debug("Pausing partition {}", partition); subscriptions.pause(partition); } } finally { release(); } } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test public void closeShouldBeIdempotent() { KafkaConsumer<byte[], byte[]> consumer = newConsumer(); consumer.close(); consumer.close(); }
@Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
KafkaConsumer implements Consumer<K, V> { @Override public void close() { close(DEFAULT_CLOSE_TIMEOUT_MS, TimeUnit.MILLISECONDS); } KafkaConsumer(Map<String, Object> configs); KafkaConsumer(Map<String, Object> configs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(Properties properties); KafkaConsumer(Properties properties, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); @SuppressWarnings("unchecked") private KafkaConsumer(ConsumerConfig config, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer); KafkaConsumer(String clientId, ConsumerCoordinator coordinator, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Fetcher<K, V> fetcher, ConsumerInterceptors<K, V> interceptors, Time time, ConsumerNetworkClient client, Metrics metrics, SubscriptionState subscriptions, Metadata metadata, long retryBackoffMs, long requestTimeoutMs); Set<TopicPartition> assignment(); Set<String> subscription(); @Override // 订阅指定的topic,并未消费者自动分配分区 void subscribe(Collection<String> topics, ConsumerRebalanceListener listener); @Override void subscribe(Collection<String> topics); @Override void subscribe(Pattern pattern, ConsumerRebalanceListener listener); void unsubscribe(); @Override // 用户手动订阅指定的topic并指定消费的分区,和subscribe方法互斥 void assign(Collection<TopicPartition> partitions); @Override // 负责从服务端获取消息 ConsumerRecords<K, V> poll(long timeout); @Override void commitSync(); @Override void commitSync(final Map<TopicPartition, OffsetAndMetadata> offsets); @Override void commitAsync(); @Override void commitAsync(OffsetCommitCallback callback); @Override void commitAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, OffsetCommitCallback callback); @Override // 指定消费者起始消费的位置 void seek(TopicPartition partition, long offset); void seekToBeginning(Collection<TopicPartition> partitions); void seekToEnd(Collection<TopicPartition> partitions); long position(TopicPartition partition); @Override OffsetAndMetadata committed(TopicPartition partition); @Override Map<MetricName, ? extends Metric> metrics(); @Override List<PartitionInfo> partitionsFor(String topic); @Override Map<String, List<PartitionInfo>> listTopics(); @Override void pause(Collection<TopicPartition> partitions); @Override // 恢复consumer void resume(Collection<TopicPartition> partitions); @Override // 暂停consumer Set<TopicPartition> paused(); @Override Map<TopicPartition, OffsetAndTimestamp> offsetsForTimes(Map<TopicPartition, Long> timestampsToSearch); @Override Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions); @Override Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions); @Override void close(); void close(long timeout, TimeUnit timeUnit); @Override void wakeup(); }
@Test @SuppressWarnings("deprecation") public void testNullChecksumInConstructor() { String key = "key"; String value = "value"; long timestamp = 242341324L; ConsumerRecord<String, String> record = new ConsumerRecord<>("topic", 0, 23L, timestamp, TimestampType.CREATE_TIME, null, key.length(), value.length(), key, value, new RecordHeaders()); assertEquals(DefaultRecord.computePartialChecksum(timestamp, key.length(), value.length()), record.checksum()); }
@Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; }
ConsumerRecord { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } }
ConsumerRecord { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } ConsumerRecord(String topic, int partition, long offset, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, long checksum, int serializedKeySize, int serializedValueSize, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers); }
ConsumerRecord { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } ConsumerRecord(String topic, int partition, long offset, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, long checksum, int serializedKeySize, int serializedValueSize, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers); String topic(); int partition(); Headers headers(); K key(); V value(); long offset(); long timestamp(); TimestampType timestampType(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); @Override String toString(); }
ConsumerRecord { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } ConsumerRecord(String topic, int partition, long offset, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, long checksum, int serializedKeySize, int serializedValueSize, K key, V value); ConsumerRecord(String topic, int partition, long offset, long timestamp, TimestampType timestampType, Long checksum, int serializedKeySize, int serializedValueSize, K key, V value, Headers headers); String topic(); int partition(); Headers headers(); K key(); V value(); long offset(); long timestamp(); TimestampType timestampType(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); @Override String toString(); static final long NO_TIMESTAMP; static final int NULL_SIZE; static final int NULL_CHECKSUM; }
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList()))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testOneConsumerNonexistentTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void booleanToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"boolean\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testOneConsumerOneTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testOnlyAssignsPartitionsFromSubscribedTopics() { String topic = "topic"; String otherTopic = "other"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); partitionsPerTopic.put(otherTopic, 3); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic)))); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testOneConsumerMultipleTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 1); partitionsPerTopic.put(topic2, 2); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2)))); assertEquals(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testTwoConsumersOneTopicTwoPartitions() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic))); consumers.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic, 1)), assignment.get(consumer2)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testMultipleConsumersMixedTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); consumers.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic1, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertEquals(partitions(tp(topic1, 2)), assignment.get(consumer3)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testTwoConsumersTwoTopicsSixPartitions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, Subscription> consumers = new HashMap<>(); consumers.put(consumer1, new Subscription(topics(topic1, topic2))); consumers.put(consumer2, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, consumers); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2), tp(topic2, 1)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 2)), assignment.get(consumer2)); }
@Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
RoundRobinAssignor extends AbstractPartitionAssignor { @Override public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> assignment = new HashMap<>(); for (String memberId : subscriptions.keySet()) assignment.put(memberId, new ArrayList<TopicPartition>()); CircularIterator<String> assigner = new CircularIterator<>(Utils.sorted(subscriptions.keySet())); for (TopicPartition partition : allPartitionsSorted(partitionsPerTopic, subscriptions)) { final String topic = partition.topic(); while (!subscriptions.get(assigner.peek()).topics().contains(topic)) assigner.next(); assignment.get(assigner.next()).add(partition); } return assignment; } @Override Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); List<TopicPartition> allPartitionsSorted(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override String name(); }
@Test public void testMetadata() throws Exception { long time = 0; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertFalse("No update needed.", metadata.timeToNextUpdate(time) == 0); metadata.requestUpdate(); assertFalse("Still no updated needed due to backoff", metadata.timeToNextUpdate(time) == 0); time += refreshBackoffMs; assertTrue("Update needed now that backoff time expired", metadata.timeToNextUpdate(time) == 0); String topic = "my-topic"; Thread t1 = asyncFetch(topic, 500); Thread t2 = asyncFetch(topic, 500); assertTrue("Awaiting update", t1.isAlive()); assertTrue("Awaiting update", t2.isAlive()); while (t1.isAlive() || t2.isAlive()) { if (metadata.timeToNextUpdate(time) == 0) { metadata.update(TestUtils.singletonCluster(topic, 1), Collections.<String>emptySet(), time); time += refreshBackoffMs; } Thread.sleep(1); } t1.join(); t2.join(); assertFalse("No update needed.", metadata.timeToNextUpdate(time) == 0); time += metadataExpireMs; assertTrue("Update needed due to stale metadata.", metadata.timeToNextUpdate(time) == 0); }
public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); }
Metadata { public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); } }
Metadata { public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); }
Metadata { public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); }
Metadata { public Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation) { this(refreshBackoffMs, metadataExpireMs, allowAutoTopicCreation, false, new ClusterResourceListeners()); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void testTimeToNextUpdate() { checkTimeToNextUpdate(100, 1000); checkTimeToNextUpdate(1000, 100); checkTimeToNextUpdate(0, 0); checkTimeToNextUpdate(0, 100); checkTimeToNextUpdate(100, 0); }
public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); }
Metadata { public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); } }
Metadata { public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); }
Metadata { public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); }
Metadata { public synchronized long timeToNextUpdate(long nowMs) { long timeToExpire = needUpdate ? 0 : Math.max(this.lastSuccessfulRefreshMs + this.metadataExpireMs - nowMs, 0); long timeToAllowUpdate = this.lastRefreshMs + this.refreshBackoffMs - nowMs; return Math.max(timeToExpire, timeToAllowUpdate); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void testFailedUpdate() { long time = 100; metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertEquals(100, metadata.timeToNextUpdate(1000)); metadata.failedUpdate(1100); assertEquals(100, metadata.timeToNextUpdate(1100)); assertEquals(100, metadata.lastSuccessfulUpdate()); metadata.needMetadataForAllTopics(true); metadata.update(Cluster.empty(), Collections.<String>emptySet(), time); assertEquals(100, metadata.timeToNextUpdate(1000)); }
public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; }
Metadata { public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; } }
Metadata { public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); }
Metadata { public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); }
Metadata { public synchronized void failedUpdate(long now) { this.lastRefreshMs = now; } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test public void byteToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT8_SCHEMA, (byte) 12)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int8\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testClusterListenerGetsNotifiedOfUpdate() { long time = 0; MockClusterResourceListener mockClusterListener = new MockClusterResourceListener(); ClusterResourceListeners listeners = new ClusterResourceListeners(); listeners.maybeAdd(mockClusterListener); metadata = new Metadata(refreshBackoffMs, metadataExpireMs, true, false, listeners); String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList(new InetSocketAddress(hostName, 9002))); metadata.update(cluster, Collections.<String>emptySet(), time); assertFalse("ClusterResourceListener should not called when metadata is updated with bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get()); metadata.update(new Cluster( "dummy", Arrays.asList(new Node(0, "host1", 1000)), Arrays.asList( new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100); assertEquals("MockClusterResourceListener did not get cluster metadata correctly", "dummy", mockClusterListener.clusterResource().clusterId()); assertTrue("MockClusterResourceListener should be called when metadata is updated with non-bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get()); }
public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); }
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); } }
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); }
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); }
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation, boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }
@Test @SuppressWarnings("deprecation") public void testNullChecksum() { long timestamp = 2340234L; int keySize = 3; int valueSize = 5; RecordMetadata metadata = new RecordMetadata(new TopicPartition("foo", 0), 15L, 3L, timestamp, null, keySize, valueSize); assertEquals(DefaultRecord.computePartialChecksum(timestamp, keySize, valueSize), metadata.checksum()); }
@Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; }
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } }
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, Long checksum, int serializedKeySize, int serializedValueSize); @Deprecated RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, long checksum, int serializedKeySize, int serializedValueSize); }
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, Long checksum, int serializedKeySize, int serializedValueSize); @Deprecated RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, long checksum, int serializedKeySize, int serializedValueSize); long offset(); long timestamp(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); String topic(); int partition(); @Override String toString(); }
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, Long checksum, int serializedKeySize, int serializedValueSize); @Deprecated RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp, long checksum, int serializedKeySize, int serializedValueSize); long offset(); long timestamp(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); String topic(); int partition(); @Override String toString(); static final int UNKNOWN_PARTITION; }
@Test public void shouldThrowOnInitTransactionIfProducerAlreadyInitializedForTransactions() { producer.initTransactions(); try { producer.initTransactions(); fail("Should have thrown as producer is already initialized"); } catch (IllegalStateException e) { } }
@Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; }
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } }
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnBeginTransactionIfTransactionsNotInitialized() { producer.beginTransaction(); }
@Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; }
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } }
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnSendOffsetsToTransactionIfTransactionsNotInitialized() { producer.sendOffsetsToTransaction(null, null); }
@Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; }
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } }
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnCommitIfTransactionsNotInitialized() { producer.commitTransaction(); }
@Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; }
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } }
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowOnAbortIfTransactionsNotInitialized() { producer.abortTransaction(); }
@Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; }
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } }
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test(expected = IllegalStateException.class) public void shouldThrowFenceProducerIfTransactionsNotInitialized() { producer.fenceProducer(); }
public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; }
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } }
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test public void shouldThrowOnCloseIfProducerIsClosed() { producer.close(); try { producer.close(); fail("Should have thrown as producer is already closed"); } catch (IllegalStateException e) { } }
@Override public void close() { close(0, null); }
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } }
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test public void shouldBeFlushedIfNoBufferedRecords() { assertTrue(producer.flushed()); }
public boolean flushed() { return this.completions.isEmpty(); }
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } }
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); }
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } MockProducer(final Cluster cluster, final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete, final Partitioner partitioner, final Serializer<K> keySerializer, final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }
@Test public void shortToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT16_SCHEMA, (short) 12)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int16\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoProducerId() { transactionManager.failIfNotReadyForSend(); }
public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test public void testFailIfNotReadyForSendIdempotentProducer() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.failIfNotReadyForSend(); }
public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoOngoingTransaction() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.failIfNotReadyForSend(); }
public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test public void testMaybeAddPartitionToTransaction() { long pid = 13131L; short epoch = 1; TopicPartition partition = new TopicPartition("foo", 0); doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(partition); assertTrue(transactionManager.hasPartitionsToAdd()); assertFalse(transactionManager.isPartitionAdded(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.NONE); sender.run(time.milliseconds()); assertFalse(transactionManager.hasPartitionsToAdd()); assertTrue(transactionManager.isPartitionAdded(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); transactionManager.maybeAddPartitionToTransaction(partition); assertFalse(transactionManager.hasPartitionsToAdd()); assertTrue(transactionManager.isPartitionAdded(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); }
public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test(expected = IllegalStateException.class) public void testMaybeAddPartitionToTransactionBeforeInitTransactions() { transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test(expected = IllegalStateException.class) public void testMaybeAddPartitionToTransactionBeforeBeginTransaction() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test(expected = IllegalStateException.class) public void testInvalidSequenceIncrement() { TransactionManager transactionManager = new TransactionManager(); transactionManager.incrementSequenceNumber(tp0, 3333); }
synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); }
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); } }
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT); Node node1 = new Node(0, "localhost", 1111); PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); Cluster cluster = new Cluster(null, Arrays.asList(node1), Arrays.asList(part1), Collections.<String>emptySet(), Collections.<String>emptySet()); Set<Node> nodes = new HashSet<>(); nodes.add(node1); Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(drainedBatches.containsKey(node1.id())); assertTrue(drainedBatches.get(node1.id()).isEmpty()); }
public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); }
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } }
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); }
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); }
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }
@Test public void testDelayedAllocation() throws Exception { BufferPool pool = new BufferPool(5 * 1024, 1024, metrics, time, metricGroup); ByteBuffer buffer = pool.allocate(1024, maxBlockTimeMs); CountDownLatch doDealloc = asyncDeallocate(pool, buffer); CountDownLatch allocation = asyncAllocate(pool, 5 * 1024); assertEquals("Allocation shouldn't have happened yet, waiting on memory.", 1L, allocation.getCount()); doDealloc.countDown(); assertTrue("Allocation should succeed soon after de-allocation", allocation.await(1, TimeUnit.SECONDS)); }
public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } }
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } } }
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); }
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }
@Test public void testStressfulSituation() throws Exception { int numThreads = 10; final int iterations = 50000; final int poolableSize = 1024; final long totalMemory = numThreads / 2 * poolableSize; final BufferPool pool = new BufferPool(totalMemory, poolableSize, metrics, time, metricGroup); List<StressTestThread> threads = new ArrayList<StressTestThread>(); for (int i = 0; i < numThreads; i++) threads.add(new StressTestThread(pool, iterations)); for (StressTestThread thread : threads) thread.start(); for (StressTestThread thread : threads) thread.join(); for (StressTestThread thread : threads) assertTrue("Thread should have completed all iterations successfully.", thread.success.get()); assertEquals(totalMemory, pool.availableMemory()); }
public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } }
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } }
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); }
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }
@Test public void intToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT32_SCHEMA, 12)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(12, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).intValue()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testAbortIncompleteBatches() throws Exception { long lingerMs = Long.MAX_VALUE; int numRecords = 100; final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0); final RecordAccumulator accum = new RecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs, 100L, metrics, time, new ApiVersions(), null); class TestCallback implements Callback { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { assertTrue(exception.getMessage().equals("Producer is closed forcefully.")); numExceptionReceivedInCallback.incrementAndGet(); } } for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(accum.hasUndrained()); assertTrue(accum.hasIncomplete()); int numDrainedRecords = 0; for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) { for (ProducerBatch batch : drainedEntry.getValue()) { assertTrue(batch.isClosed()); assertFalse(batch.produceFuture.completed()); numDrainedRecords += batch.recordCount; } } assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords); accum.abortIncompleteBatches(); assertEquals(numRecords, numExceptionReceivedInCallback.get()); assertFalse(accum.hasUndrained()); assertFalse(accum.hasIncomplete()); }
public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); }
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } }
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); }
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testExpiredBatches() throws InterruptedException { long retryBackoffMs = 100L; long lingerMs = 3000L; int requestTimeout = 60; int batchSize = 1025; RecordAccumulator accum = new RecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, lingerMs, retryBackoffMs, metrics, time, new ApiVersions(), null); int appends = expectedNumAppends(batchSize); for (int i = 0; i < appends; i++) { accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); } accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); Set<Node> readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); time.sleep(requestTimeout + 1); accum.mutePartition(tp1); List<ProducerBatch> expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); accum.unmutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should be expired", 1, expiredBatches.size()); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); time.sleep(lingerMs); assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); time.sleep(requestTimeout + 1); accum.mutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired when metadata is still available and partition is muted", 0, expiredBatches.size()); accum.unmutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should be expired when the partition is not muted", 1, expiredBatches.size()); assertEquals("No partitions should be ready.", 0, accum.ready(cluster, time.milliseconds()).readyNodes.size()); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); time.sleep(lingerMs); readyNodes = accum.ready(cluster, time.milliseconds()).readyNodes; assertEquals("Our partition's leader should be ready", Collections.singleton(node1), readyNodes); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("There should be only one batch.", drained.get(node1.id()).size(), 1); time.sleep(1000L); accum.reenqueue(drained.get(node1.id()).get(0), time.milliseconds()); time.sleep(requestTimeout + retryBackoffMs); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired.", 0, expiredBatches.size()); time.sleep(1L); accum.mutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should not be expired when the partition is muted", 0, expiredBatches.size()); accum.unmutePartition(tp1); expiredBatches = accum.expiredBatches(requestTimeout, time.milliseconds()); assertEquals("The batch should be expired when the partition is not muted.", 1, expiredBatches.size()); }
public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if (!muted.contains(tp)) { synchronized (dq) { ProducerBatch lastBatch = dq.peekLast(); Iterator<ProducerBatch> batchIterator = dq.iterator(); while (batchIterator.hasNext()) { ProducerBatch batch = batchIterator.next(); boolean isFull = batch != lastBatch || batch.isFull(); if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { expiredBatches.add(batch); batchIterator.remove(); } else { break; } } } } } return expiredBatches; }
RecordAccumulator { public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if (!muted.contains(tp)) { synchronized (dq) { ProducerBatch lastBatch = dq.peekLast(); Iterator<ProducerBatch> batchIterator = dq.iterator(); while (batchIterator.hasNext()) { ProducerBatch batch = batchIterator.next(); boolean isFull = batch != lastBatch || batch.isFull(); if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { expiredBatches.add(batch); batchIterator.remove(); } else { break; } } } } } return expiredBatches; } }
RecordAccumulator { public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if (!muted.contains(tp)) { synchronized (dq) { ProducerBatch lastBatch = dq.peekLast(); Iterator<ProducerBatch> batchIterator = dq.iterator(); while (batchIterator.hasNext()) { ProducerBatch batch = batchIterator.next(); boolean isFull = batch != lastBatch || batch.isFull(); if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { expiredBatches.add(batch); batchIterator.remove(); } else { break; } } } } } return expiredBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); }
RecordAccumulator { public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if (!muted.contains(tp)) { synchronized (dq) { ProducerBatch lastBatch = dq.peekLast(); Iterator<ProducerBatch> batchIterator = dq.iterator(); while (batchIterator.hasNext()) { ProducerBatch batch = batchIterator.next(); boolean isFull = batch != lastBatch || batch.isFull(); if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { expiredBatches.add(batch); batchIterator.remove(); } else { break; } } } } } return expiredBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
RecordAccumulator { public List<ProducerBatch> expiredBatches(int requestTimeout, long now) { List<ProducerBatch> expiredBatches = new ArrayList<>(); for (Map.Entry<TopicPartition, Deque<ProducerBatch>> entry : this.batches.entrySet()) { Deque<ProducerBatch> dq = entry.getValue(); TopicPartition tp = entry.getKey(); if (!muted.contains(tp)) { synchronized (dq) { ProducerBatch lastBatch = dq.peekLast(); Iterator<ProducerBatch> batchIterator = dq.iterator(); while (batchIterator.hasNext()) { ProducerBatch batch = batchIterator.next(); boolean isFull = batch != lastBatch || batch.isFull(); if (batch.maybeExpire(requestTimeout, retryBackoffMs, now, this.lingerMs, isFull)) { expiredBatches.add(batch); batchIterator.remove(); } else { break; } } } } } return expiredBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test(expected = UnsupportedVersionException.class) public void testIdempotenceWithOldMagic() throws InterruptedException { ApiVersions apiVersions = new ApiVersions(); int batchSize = 1025; apiVersions.update("foobar", NodeApiVersions.create(Arrays.asList(new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); RecordAccumulator accum = new RecordAccumulator(batchSize + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 10 * batchSize, CompressionType.NONE, 10, 100L, metrics, time, apiVersions, new TransactionManager()); accum.append(tp1, 0L, key, value, Record.EMPTY_HEADERS, null, 0); }
public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testSplitAndReenqueue() throws ExecutionException, InterruptedException { long now = time.milliseconds(); RecordAccumulator accum = new RecordAccumulator(1024, 10 * 1024, CompressionType.GZIP, 10, 100L, metrics, time, new ApiVersions(), null); ByteBuffer buffer = ByteBuffer.allocate(4096); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(tp1, builder, now, true); byte[] value = new byte[1024]; final AtomicInteger acked = new AtomicInteger(0); Callback cb = new Callback() { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { acked.incrementAndGet(); } }; Future<RecordMetadata> future1 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); Future<RecordMetadata> future2 = batch.tryAppend(now, null, value, Record.EMPTY_HEADERS, cb, now); assertNotNull(future1); assertNotNull(future2); batch.close(); accum.reenqueue(batch, now); time.sleep(101L); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertTrue("The batch should be ready", result.readyNodes.size() > 0); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertEquals("Only node1 should be drained", 1, drained.size()); assertEquals("Only one batch should be drained", 1, drained.get(node1.id()).size()); accum.splitAndReenqueue(drained.get(node1.id()).get(0)); time.sleep(101L); drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).done(acked.get(), 100L, null); assertEquals("The first message should have been acked.", 1, acked.get()); assertTrue(future1.isDone()); assertEquals(0, future1.get().offset()); drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertFalse(drained.isEmpty()); assertFalse(drained.get(node1.id()).isEmpty()); drained.get(node1.id()).get(0).done(acked.get(), 100L, null); assertEquals("Both message should have been acked.", 2, acked.get()); assertTrue(future2.isDone()); assertEquals(1, future2.get().offset()); }
public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); synchronized (partitionDequeue) { partitionDequeue.addFirst(batch); } } return numSplitBatches; }
RecordAccumulator { public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); synchronized (partitionDequeue) { partitionDequeue.addFirst(batch); } } return numSplitBatches; } }
RecordAccumulator { public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); synchronized (partitionDequeue) { partitionDequeue.addFirst(batch); } } return numSplitBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); }
RecordAccumulator { public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); synchronized (partitionDequeue) { partitionDequeue.addFirst(batch); } } return numSplitBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
RecordAccumulator { public int splitAndReenqueue(ProducerBatch bigBatch) { CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression, Math.max(1.0f, (float) bigBatch.compressionRatio())); Deque<ProducerBatch> dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque<ProducerBatch> partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { ProducerBatch batch = dq.pollLast(); incomplete.add(batch); synchronized (partitionDequeue) { partitionDequeue.addFirst(batch); } } return numSplitBatches; } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testSplitFrequency() throws InterruptedException { long seed = System.currentTimeMillis(); Random random = new Random(); random.setSeed(seed); final int batchSize = 1024; final int numMessages = 1000; RecordAccumulator accum = new RecordAccumulator(batchSize, 3 * 1024, CompressionType.GZIP, 10, 100L, metrics, time, new ApiVersions(), null); for (int goodCompRatioPercentage = 1; goodCompRatioPercentage < 100; goodCompRatioPercentage++) { int numSplit = 0; int numBatches = 0; CompressionRatioEstimator.resetEstimation(topic); for (int i = 0; i < numMessages; i++) { int dice = random.nextInt(100); byte[] value = (dice < goodCompRatioPercentage) ? bytesWithGoodCompression(random) : bytesWithPoorCompression(random, 100); accum.append(tp1, 0L, null, value, Record.EMPTY_HEADERS, null, 0); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; } time.sleep(10); BatchDrainedResult result = completeOrSplitBatches(accum, batchSize); numSplit += result.numSplit; numBatches += result.numBatches; assertTrue(String.format("Total num batches = %d, split batches = %d, more than 10%% of the batch splits. " + "Random seed is " + seed, numBatches, numSplit), (double) numSplit / numBatches < 0.1f); } }
public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
RecordAccumulator { public RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock) throws InterruptedException { appendsInProgress.incrementAndGet(); ByteBuffer buffer = null; if (headers == null) headers = Record.EMPTY_HEADERS; try { Deque<ProducerBatch> dq = getOrCreateDeque(tp); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) return appendResult; } byte maxUsableMagic = apiVersions.maxUsableProduceMagic(); int size = Math.max(this.batchSize, AbstractRecords.estimateSizeInBytesUpperBound(maxUsableMagic, compression, key, value, headers)); log.trace("Allocating a new {} byte message buffer for topic {} partition {}", size, tp.topic(), tp.partition()); buffer = free.allocate(size, maxTimeToBlock); synchronized (dq) { if (closed) throw new IllegalStateException("Cannot send after the producer is closed."); RecordAppendResult appendResult = tryAppend(timestamp, key, value, headers, callback, dq); if (appendResult != null) { return appendResult; } MemoryRecordsBuilder recordsBuilder = recordsBuilder(buffer, maxUsableMagic); ProducerBatch batch = new ProducerBatch(tp, recordsBuilder, time.milliseconds()); FutureRecordMetadata future = Utils.notNull(batch.tryAppend(timestamp, key, value, headers, callback, time.milliseconds())); dq.addLast(batch); incomplete.add(batch); buffer = null; return new RecordAppendResult(future, dq.size() > 1 || batch.isFull(), true); } } finally { if (buffer != null) free.deallocate(buffer); appendsInProgress.decrementAndGet(); } } RecordAccumulator(int batchSize, long totalSize, CompressionType compression, long lingerMs, long retryBackoffMs, Metrics metrics, Time time, ApiVersions apiVersions, TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp, long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster, Set<Node> nodes, int maxSize, long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }
@Test public void testChecksumNullForMagicV2() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(future); assertNull(future.checksumOrNull()); }
public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
@Test public void testAppendedChecksumMagicV0AndV1() { for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); byte[] key = "hi".getBytes(); byte[] value = "there".getBytes(); FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now); assertNotNull(future); byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME); long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value); assertEquals(expectedChecksum, future.checksumOrNull().longValue()); } }
public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
@Test public void testLargeLingerOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 100L, now - 2L, Long.MAX_VALUE, false)); }
boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
@Test public void testLargeFullOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 10240L, now - 2L, 10240L, true)); }
boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }
@Test public void testKeyPartitionIsStable() { int partition = partitioner.partition("test", null, keyBytes, null, null, cluster); assertEquals("Same key should yield same partition", partition, partitioner.partition("test", null, keyBytes, null, null, cluster)); }
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } void configure(Map<String, ?> configs); int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); void close(); }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } void configure(Map<String, ?> configs); int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); void close(); }
@Test public void longToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, Schema.INT64_SCHEMA, 4398046511104L)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int64\", \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); assertEquals(4398046511104L, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).longValue()); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testRoundRobinWithUnavailablePartitions() { int countForPart0 = 0; int countForPart2 = 0; for (int i = 1; i <= 100; i++) { int part = partitioner.partition("test", null, null, null, null, cluster); assertTrue("We should never choose a leader-less node in round robin", part == 0 || part == 2); if (part == 0) countForPart0++; else countForPart2++; } assertEquals("The distribution between two available partitions should be even", countForPart0, countForPart2); }
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } void configure(Map<String, ?> configs); int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); void close(); }
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } void configure(Map<String, ?> configs); int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); void close(); }