method2testcases
stringlengths 118
6.63k
|
---|
### Question:
Metadata { public synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now) { Objects.requireNonNull(cluster, "cluster should not be null"); this.needUpdate = false; this.lastRefreshMs = now; this.lastSuccessfulRefreshMs = now; this.version += 1; if (topicExpiryEnabled) { for (Iterator<Map.Entry<String, Long>> it = topics.entrySet().iterator(); it.hasNext(); ) { Map.Entry<String, Long> entry = it.next(); long expireMs = entry.getValue(); if (expireMs == TOPIC_EXPIRY_NEEDS_UPDATE) entry.setValue(now + TOPIC_EXPIRY_MS); else if (expireMs <= now) { it.remove(); log.debug("Removing unused topic {} from the metadata list, expiryMs {} now {}", entry.getKey(), expireMs, now); } } } for (Listener listener: listeners) listener.onMetadataUpdate(cluster, unavailableTopics); String previousClusterId = cluster.clusterResource().clusterId(); if (this.needMetadataForAllTopics) { this.needUpdate = false; this.cluster = getClusterForCurrentTopics(cluster); } else { this.cluster = cluster; } if (!cluster.isBootstrapConfigured()) { String clusterId = cluster.clusterResource().clusterId(); if (clusterId == null ? previousClusterId != null : !clusterId.equals(previousClusterId)) log.info("Cluster ID: {}", cluster.clusterResource().clusterId()); clusterResourceListeners.onUpdate(cluster.clusterResource()); } notifyAll(); log.debug("Updated cluster metadata version {} to {}", this.version, this.cluster); } Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation); Metadata(long refreshBackoffMs, long metadataExpireMs, boolean allowAutoTopicCreation,
boolean topicExpiryEnabled, ClusterResourceListeners clusterResourceListeners); synchronized Cluster fetch(); synchronized void add(String topic); synchronized long timeToNextUpdate(long nowMs); synchronized int requestUpdate(); synchronized boolean updateRequested(); synchronized void awaitUpdate(final int lastVersion, final long maxWaitMs); synchronized void setTopics(Collection<String> topics); synchronized Set<String> topics(); synchronized boolean containsTopic(String topic); synchronized void update(Cluster cluster, Set<String> unavailableTopics, long now); synchronized void failedUpdate(long now); synchronized int version(); synchronized long lastSuccessfulUpdate(); boolean allowAutoTopicCreation(); synchronized void needMetadataForAllTopics(boolean needMetadataForAllTopics); synchronized boolean needMetadataForAllTopics(); synchronized void addListener(Listener listener); synchronized void removeListener(Listener listener); static final long TOPIC_EXPIRY_MS; }### Answer:
@Test public void testClusterListenerGetsNotifiedOfUpdate() { long time = 0; MockClusterResourceListener mockClusterListener = new MockClusterResourceListener(); ClusterResourceListeners listeners = new ClusterResourceListeners(); listeners.maybeAdd(mockClusterListener); metadata = new Metadata(refreshBackoffMs, metadataExpireMs, true, false, listeners); String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList(new InetSocketAddress(hostName, 9002))); metadata.update(cluster, Collections.<String>emptySet(), time); assertFalse("ClusterResourceListener should not called when metadata is updated with bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get()); metadata.update(new Cluster( "dummy", Arrays.asList(new Node(0, "host1", 1000)), Arrays.asList( new PartitionInfo("topic", 0, null, null, null), new PartitionInfo("topic1", 0, null, null, null)), Collections.<String>emptySet(), Collections.<String>emptySet()), Collections.<String>emptySet(), 100); assertEquals("MockClusterResourceListener did not get cluster metadata correctly", "dummy", mockClusterListener.clusterResource().clusterId()); assertTrue("MockClusterResourceListener should be called when metadata is updated with non-bootstrap Cluster", MockClusterResourceListener.IS_ON_UPDATE_CALLED.get()); } |
### Question:
RecordMetadata { @Deprecated public long checksum() { if (checksum == null) this.checksum = DefaultRecord.computePartialChecksum(timestamp, serializedKeySize, serializedValueSize); return this.checksum; } RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp,
Long checksum, int serializedKeySize, int serializedValueSize); @Deprecated RecordMetadata(TopicPartition topicPartition, long baseOffset, long relativeOffset, long timestamp,
long checksum, int serializedKeySize, int serializedValueSize); long offset(); long timestamp(); @Deprecated long checksum(); int serializedKeySize(); int serializedValueSize(); String topic(); int partition(); @Override String toString(); static final int UNKNOWN_PARTITION; }### Answer:
@Test @SuppressWarnings("deprecation") public void testNullChecksum() { long timestamp = 2340234L; int keySize = 3; int valueSize = 5; RecordMetadata metadata = new RecordMetadata(new TopicPartition("foo", 0), 15L, 3L, timestamp, null, keySize, valueSize); assertEquals(DefaultRecord.computePartialChecksum(timestamp, keySize, valueSize), metadata.checksum()); } |
### Question:
MockProducer implements Producer<K, V> { @Override public void initTransactions() { verifyProducerState(); if (this.transactionInitialized) { throw new IllegalStateException("MockProducer has already been initialized for transactions."); } this.transactionInitialized = true; } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test public void shouldThrowOnInitTransactionIfProducerAlreadyInitializedForTransactions() { producer.initTransactions(); try { producer.initTransactions(); fail("Should have thrown as producer is already initialized"); } catch (IllegalStateException e) { } } |
### Question:
MockProducer implements Producer<K, V> { @Override public void beginTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); this.transactionInFlight = true; this.transactionCommitted = false; this.transactionAborted = false; this.sentOffsets = false; } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowOnBeginTransactionIfTransactionsNotInitialized() { producer.beginTransaction(); } |
### Question:
MockProducer implements Producer<K, V> { @Override public void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets, String consumerGroupId) throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); Objects.requireNonNull(consumerGroupId); if (offsets.size() == 0) { return; } Map<TopicPartition, OffsetAndMetadata> uncommittedOffsets = this.uncommittedConsumerGroupOffsets.get(consumerGroupId); if (uncommittedOffsets == null) { uncommittedOffsets = new HashMap<>(); this.uncommittedConsumerGroupOffsets.put(consumerGroupId, uncommittedOffsets); } uncommittedOffsets.putAll(offsets); this.sentOffsets = true; } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowOnSendOffsetsToTransactionIfTransactionsNotInitialized() { producer.sendOffsetsToTransaction(null, null); } |
### Question:
MockProducer implements Producer<K, V> { @Override public void commitTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.sent.addAll(this.uncommittedSends); if (!this.uncommittedConsumerGroupOffsets.isEmpty()) this.consumerGroupOffsets.add(this.uncommittedConsumerGroupOffsets); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets = new HashMap<>(); this.transactionCommitted = true; this.transactionAborted = false; this.transactionInFlight = false; ++this.commitCount; } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowOnCommitIfTransactionsNotInitialized() { producer.commitTransaction(); } |
### Question:
MockProducer implements Producer<K, V> { @Override public void abortTransaction() throws ProducerFencedException { verifyProducerState(); verifyTransactionsInitialized(); verifyNoTransactionInFlight(); flush(); this.uncommittedSends.clear(); this.uncommittedConsumerGroupOffsets.clear(); this.transactionCommitted = false; this.transactionAborted = true; this.transactionInFlight = false; } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowOnAbortIfTransactionsNotInitialized() { producer.abortTransaction(); } |
### Question:
MockProducer implements Producer<K, V> { public void fenceProducer() { verifyProducerState(); verifyTransactionsInitialized(); this.producerFenced = true; } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowFenceProducerIfTransactionsNotInitialized() { producer.fenceProducer(); } |
### Question:
MockProducer implements Producer<K, V> { @Override public void close() { close(0, null); } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test public void shouldThrowOnCloseIfProducerIsClosed() { producer.close(); try { producer.close(); fail("Should have thrown as producer is already closed"); } catch (IllegalStateException e) { } } |
### Question:
MockProducer implements Producer<K, V> { public boolean flushed() { return this.completions.isEmpty(); } MockProducer(final Cluster cluster,
final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(final boolean autoComplete,
final Partitioner partitioner,
final Serializer<K> keySerializer,
final Serializer<V> valueSerializer); MockProducer(); @Override void initTransactions(); @Override void beginTransaction(); @Override void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); @Override void commitTransaction(); @Override void abortTransaction(); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override synchronized Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); synchronized void flush(); List<PartitionInfo> partitionsFor(String topic); Map<MetricName, Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); boolean closed(); void fenceProducer(); boolean transactionInitialized(); boolean transactionInFlight(); boolean transactionCommitted(); boolean transactionAborted(); boolean flushed(); boolean sentOffsets(); long commitCount(); synchronized List<ProducerRecord<K, V>> history(); synchronized List<Map<String, Map<TopicPartition, OffsetAndMetadata>>> consumerGroupOffsetsHistory(); synchronized void clear(); synchronized boolean completeNext(); synchronized boolean errorNext(RuntimeException e); }### Answer:
@Test public void shouldBeFlushedIfNoBufferedRecords() { assertTrue(producer.flushed()); } |
### Question:
TransactionManager { public synchronized void failIfNotReadyForSend() { if (hasError()) throw new KafkaException("Cannot perform send because at least one previous transactional or " + "idempotent request has failed with errors.", lastError); if (isTransactional()) { if (!hasProducerId()) throw new IllegalStateException("Cannot perform a 'send' before completing a call to initTransactions " + "when transactions are enabled."); if (currentState != State.IN_TRANSACTION) throw new IllegalStateException("Cannot call send in state " + currentState); } } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }### Answer:
@Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoProducerId() { transactionManager.failIfNotReadyForSend(); }
@Test public void testFailIfNotReadyForSendIdempotentProducer() { TransactionManager idempotentTransactionManager = new TransactionManager(); idempotentTransactionManager.failIfNotReadyForSend(); }
@Test(expected = IllegalStateException.class) public void testFailIfNotReadyForSendNoOngoingTransaction() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.failIfNotReadyForSend(); } |
### Question:
TransactionManager { public synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition) { failIfNotReadyForSend(); if (isPartitionAdded(topicPartition) || isPartitionPendingAdd(topicPartition)) return; log.debug("{}Begin adding new partition {} to transaction", logPrefix, topicPartition); newPartitionsInTransaction.add(topicPartition); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }### Answer:
@Test public void testMaybeAddPartitionToTransaction() { long pid = 13131L; short epoch = 1; TopicPartition partition = new TopicPartition("foo", 0); doInitTransactions(pid, epoch); transactionManager.beginTransaction(); transactionManager.maybeAddPartitionToTransaction(partition); assertTrue(transactionManager.hasPartitionsToAdd()); assertFalse(transactionManager.isPartitionAdded(partition)); assertTrue(transactionManager.isPartitionPendingAdd(partition)); prepareAddPartitionsToTxn(partition, Errors.NONE); sender.run(time.milliseconds()); assertFalse(transactionManager.hasPartitionsToAdd()); assertTrue(transactionManager.isPartitionAdded(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); transactionManager.maybeAddPartitionToTransaction(partition); assertFalse(transactionManager.hasPartitionsToAdd()); assertTrue(transactionManager.isPartitionAdded(partition)); assertFalse(transactionManager.isPartitionPendingAdd(partition)); }
@Test(expected = IllegalStateException.class) public void testMaybeAddPartitionToTransactionBeforeInitTransactions() { transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); }
@Test(expected = IllegalStateException.class) public void testMaybeAddPartitionToTransactionBeforeBeginTransaction() { long pid = 13131L; short epoch = 1; doInitTransactions(pid, epoch); transactionManager.maybeAddPartitionToTransaction(new TopicPartition("foo", 0)); } |
### Question:
TransactionManager { synchronized void incrementSequenceNumber(TopicPartition topicPartition, int increment) { Integer currentSequenceNumber = sequenceNumbers.get(topicPartition); if (currentSequenceNumber == null) throw new IllegalStateException("Attempt to increment sequence number for a partition with no current sequence."); currentSequenceNumber += increment; sequenceNumbers.put(topicPartition, currentSequenceNumber); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }### Answer:
@Test(expected = IllegalStateException.class) public void testInvalidSequenceIncrement() { TransactionManager transactionManager = new TransactionManager(); transactionManager.incrementSequenceNumber(tp0, 3333); } |
### Question:
TransactionManager { public synchronized void beginTransaction() { ensureTransactional(); maybeFailWithError(); transitionTo(State.IN_TRANSACTION); } TransactionManager(String transactionalId, int transactionTimeoutMs, long retryBackoffMs); TransactionManager(); synchronized TransactionalRequestResult initializeTransactions(); synchronized void beginTransaction(); synchronized TransactionalRequestResult beginCommit(); synchronized TransactionalRequestResult beginAbort(); synchronized TransactionalRequestResult sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); synchronized void maybeAddPartitionToTransaction(TopicPartition topicPartition); synchronized void failIfNotReadyForSend(); String transactionalId(); boolean hasProducerId(); boolean isTransactional(); final String logPrefix; }### Answer:
@Test public void testRaiseErrorWhenNoPartitionsPendingOnDrain() throws InterruptedException { final long pid = 13131L; final short epoch = 1; doInitTransactions(pid, epoch); transactionManager.beginTransaction(); accumulator.append(tp0, time.milliseconds(), "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS, null, MAX_BLOCK_TIMEOUT); Node node1 = new Node(0, "localhost", 1111); PartitionInfo part1 = new PartitionInfo(topic, 0, node1, null, null); Cluster cluster = new Cluster(null, Arrays.asList(node1), Arrays.asList(part1), Collections.<String>emptySet(), Collections.<String>emptySet()); Set<Node> nodes = new HashSet<>(); nodes.add(node1); Map<Integer, List<ProducerBatch>> drainedBatches = accumulator.drain(cluster, nodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(drainedBatches.containsKey(node1.id())); assertTrue(drainedBatches.get(node1.id()).isEmpty()); } |
### Question:
BufferPool { public ByteBuffer allocate(int size, long maxTimeToBlockMs) throws InterruptedException { if (size > this.totalMemory) throw new IllegalArgumentException("Attempt to allocate " + size + " bytes, but there is a hard limit of " + this.totalMemory + " on memory allocations."); this.lock.lock(); try { if (size == poolableSize && !this.free.isEmpty()) return this.free.pollFirst(); int freeListSize = freeSize() * this.poolableSize; if (this.availableMemory + freeListSize >= size) { freeUp(size); ByteBuffer allocatedBuffer = allocateByteBuffer(size); this.availableMemory -= size; return allocatedBuffer; } else { int accumulated = 0; ByteBuffer buffer = null; boolean hasError = true; Condition moreMemory = this.lock.newCondition(); try { long remainingTimeToBlockNs = TimeUnit.MILLISECONDS.toNanos(maxTimeToBlockMs); this.waiters.addLast(moreMemory); while (accumulated < size) { long startWaitNs = time.nanoseconds(); long timeNs; boolean waitingTimeElapsed; try { waitingTimeElapsed = !moreMemory.await(remainingTimeToBlockNs, TimeUnit.NANOSECONDS); } finally { long endWaitNs = time.nanoseconds(); timeNs = Math.max(0L, endWaitNs - startWaitNs); this.waitTime.record(timeNs, time.milliseconds()); } if (waitingTimeElapsed) { throw new TimeoutException("Failed to allocate memory within the configured max blocking time " + maxTimeToBlockMs + " ms."); } remainingTimeToBlockNs -= timeNs; if (accumulated == 0 && size == this.poolableSize && !this.free.isEmpty()) { buffer = this.free.pollFirst(); accumulated = size; } else { freeUp(size - accumulated); int got = (int) Math.min(size - accumulated, this.availableMemory); this.availableMemory -= got; accumulated += got; } } if (buffer == null) buffer = allocateByteBuffer(size); hasError = false; return buffer; } finally { if (hasError) this.availableMemory += accumulated; this.waiters.remove(moreMemory); } } } finally { try { if (!(this.availableMemory == 0 && this.free.isEmpty()) && !this.waiters.isEmpty()) this.waiters.peekFirst().signal(); } finally { lock.unlock(); } } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }### Answer:
@Test public void testDelayedAllocation() throws Exception { BufferPool pool = new BufferPool(5 * 1024, 1024, metrics, time, metricGroup); ByteBuffer buffer = pool.allocate(1024, maxBlockTimeMs); CountDownLatch doDealloc = asyncDeallocate(pool, buffer); CountDownLatch allocation = asyncAllocate(pool, 5 * 1024); assertEquals("Allocation shouldn't have happened yet, waiting on memory.", 1L, allocation.getCount()); doDealloc.countDown(); assertTrue("Allocation should succeed soon after de-allocation", allocation.await(1, TimeUnit.SECONDS)); } |
### Question:
BufferPool { public long availableMemory() { lock.lock(); try { return this.availableMemory + freeSize() * (long) this.poolableSize; } finally { lock.unlock(); } } BufferPool(long memory, int poolableSize, Metrics metrics, Time time, String metricGrpName); ByteBuffer allocate(int size, long maxTimeToBlockMs); void deallocate(ByteBuffer buffer, int size); void deallocate(ByteBuffer buffer); long availableMemory(); long unallocatedMemory(); int queued(); int poolableSize(); long totalMemory(); }### Answer:
@Test public void testStressfulSituation() throws Exception { int numThreads = 10; final int iterations = 50000; final int poolableSize = 1024; final long totalMemory = numThreads / 2 * poolableSize; final BufferPool pool = new BufferPool(totalMemory, poolableSize, metrics, time, metricGroup); List<StressTestThread> threads = new ArrayList<StressTestThread>(); for (int i = 0; i < numThreads; i++) threads.add(new StressTestThread(pool, iterations)); for (StressTestThread thread : threads) thread.start(); for (StressTestThread thread : threads) thread.join(); for (StressTestThread thread : threads) assertTrue("Thread should have completed all iterations successfully.", thread.success.get()); assertEquals(totalMemory, pool.availableMemory()); } |
### Question:
RecordAccumulator { public void abortIncompleteBatches() { do { abortBatches(); } while (appendsInProgress()); abortBatches(); this.batches.clear(); } RecordAccumulator(int batchSize,
long totalSize,
CompressionType compression,
long lingerMs,
long retryBackoffMs,
Metrics metrics,
Time time,
ApiVersions apiVersions,
TransactionManager transactionManager); RecordAppendResult append(TopicPartition tp,
long timestamp,
byte[] key,
byte[] value,
Header[] headers,
Callback callback,
long maxTimeToBlock); List<ProducerBatch> expiredBatches(int requestTimeout, long now); void reenqueue(ProducerBatch batch, long now); int splitAndReenqueue(ProducerBatch bigBatch); ReadyCheckResult ready(Cluster cluster, long nowMs); boolean hasUndrained(); Map<Integer, List<ProducerBatch>> drain(Cluster cluster,
Set<Node> nodes,
int maxSize,
long now); void deallocate(ProducerBatch batch); void beginFlush(); void awaitFlushCompletion(); boolean hasIncomplete(); void abortIncompleteBatches(); void mutePartition(TopicPartition tp); void unmutePartition(TopicPartition tp); void close(); }### Answer:
@Test public void testAbortIncompleteBatches() throws Exception { long lingerMs = Long.MAX_VALUE; int numRecords = 100; final AtomicInteger numExceptionReceivedInCallback = new AtomicInteger(0); final RecordAccumulator accum = new RecordAccumulator(128 + DefaultRecordBatch.RECORD_BATCH_OVERHEAD, 64 * 1024, CompressionType.NONE, lingerMs, 100L, metrics, time, new ApiVersions(), null); class TestCallback implements Callback { @Override public void onCompletion(RecordMetadata metadata, Exception exception) { assertTrue(exception.getMessage().equals("Producer is closed forcefully.")); numExceptionReceivedInCallback.incrementAndGet(); } } for (int i = 0; i < numRecords; i++) accum.append(new TopicPartition(topic, i % 3), 0L, key, value, null, new TestCallback(), maxBlockTimeMs); RecordAccumulator.ReadyCheckResult result = accum.ready(cluster, time.milliseconds()); assertFalse(result.readyNodes.isEmpty()); Map<Integer, List<ProducerBatch>> drained = accum.drain(cluster, result.readyNodes, Integer.MAX_VALUE, time.milliseconds()); assertTrue(accum.hasUndrained()); assertTrue(accum.hasIncomplete()); int numDrainedRecords = 0; for (Map.Entry<Integer, List<ProducerBatch>> drainedEntry : drained.entrySet()) { for (ProducerBatch batch : drainedEntry.getValue()) { assertTrue(batch.isClosed()); assertFalse(batch.produceFuture.completed()); numDrainedRecords += batch.recordCount; } } assertTrue(numDrainedRecords > 0 && numDrainedRecords < numRecords); accum.abortIncompleteBatches(); assertEquals(numRecords, numExceptionReceivedInCallback.get()); assertFalse(accum.hasUndrained()); assertFalse(accum.hasIncomplete()); } |
### Question:
ProducerBatch { public FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now) { if (!recordsBuilder.hasRoomFor(timestamp, key, value, headers)) { return null; } else { Long checksum = this.recordsBuilder.append(timestamp, key, value, headers); this.maxRecordSize = Math.max(this.maxRecordSize, AbstractRecords.estimateSizeInBytesUpperBound(magic(), recordsBuilder.compressionType(), key, value, headers)); this.lastAppendTime = now; FutureRecordMetadata future = new FutureRecordMetadata(this.produceFuture, this.recordCount, timestamp, checksum, key == null ? -1 : key.length, value == null ? -1 : value.length); thunks.add(new Thunk(callback, future)); this.recordCount++; return future; } } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }### Answer:
@Test public void testChecksumNullForMagicV2() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); FutureRecordMetadata future = batch.tryAppend(now, null, new byte[10], Record.EMPTY_HEADERS, null, now); assertNotNull(future); assertNull(future.checksumOrNull()); }
@Test public void testAppendedChecksumMagicV0AndV1() { for (byte magic : Arrays.asList(MAGIC_VALUE_V0, MAGIC_VALUE_V1)) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(128), magic, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), builder, now); byte[] key = "hi".getBytes(); byte[] value = "there".getBytes(); FutureRecordMetadata future = batch.tryAppend(now, key, value, Record.EMPTY_HEADERS, null, now); assertNotNull(future); byte attributes = LegacyRecord.computeAttributes(magic, CompressionType.NONE, TimestampType.CREATE_TIME); long expectedChecksum = LegacyRecord.computeChecksum(magic, attributes, now, key, value); assertEquals(expectedChecksum, future.checksumOrNull().longValue()); } } |
### Question:
ProducerBatch { boolean maybeExpire(int requestTimeoutMs, long retryBackoffMs, long now, long lingerMs, boolean isFull) { if (!this.inRetry() && isFull && requestTimeoutMs < (now - this.lastAppendTime)) expiryErrorMessage = (now - this.lastAppendTime) + " ms has passed since last append"; else if (!this.inRetry() && requestTimeoutMs < (createdTimeMs(now) - lingerMs)) expiryErrorMessage = (createdTimeMs(now) - lingerMs) + " ms has passed since batch creation plus linger time"; else if (this.inRetry() && requestTimeoutMs < (waitedTimeMs(now) - retryBackoffMs)) expiryErrorMessage = (waitedTimeMs(now) - retryBackoffMs) + " ms has passed since last attempt plus backoff time"; boolean expired = expiryErrorMessage != null; if (expired) abortRecordAppends(); return expired; } ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now); ProducerBatch(TopicPartition tp, MemoryRecordsBuilder recordsBuilder, long now, boolean isSplitBatch); FutureRecordMetadata tryAppend(long timestamp, byte[] key, byte[] value, Header[] headers, Callback callback, long now); void abort(RuntimeException exception); void done(long baseOffset, long logAppendTime, RuntimeException exception); Deque<ProducerBatch> split(int splitBatchSize); boolean isCompressed(); @Override String toString(); boolean inRetry(); MemoryRecords records(); int sizeInBytes(); double compressionRatio(); boolean isFull(); void setProducerState(ProducerIdAndEpoch producerIdAndEpoch, int baseSequence, boolean isTransactional); void closeForRecordAppends(); void close(); void abortRecordAppends(); boolean isClosed(); ByteBuffer buffer(); int initialCapacity(); boolean isWritable(); byte magic(); long producerId(); short producerEpoch(); }### Answer:
@Test public void testLargeLingerOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 100L, now - 2L, Long.MAX_VALUE, false)); }
@Test public void testLargeFullOldNowExpire() { ProducerBatch batch = new ProducerBatch(new TopicPartition("topic", 1), memoryRecordsBuilder, now); assertFalse(batch.maybeExpire(10240, 10240L, now - 2L, 10240L, true)); } |
### Question:
DefaultPartitioner implements Partitioner { public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { List<PartitionInfo> partitions = cluster.partitionsForTopic(topic); int numPartitions = partitions.size(); if (keyBytes == null) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (availablePartitions.size() > 0) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { return Utils.toPositive(nextValue) % numPartitions; } } else { return Utils.toPositive(Utils.murmur2(keyBytes)) % numPartitions; } } void configure(Map<String, ?> configs); int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster); void close(); }### Answer:
@Test public void testKeyPartitionIsStable() { int partition = partitioner.partition("test", null, keyBytes, null, null, cluster); assertEquals("Same key should yield same partition", partition, partitioner.partition("test", null, keyBytes, null, null, cluster)); }
@Test public void testRoundRobinWithUnavailablePartitions() { int countForPart0 = 0; int countForPart2 = 0; for (int i = 1; i <= 100; i++) { int part = partitioner.partition("test", null, null, null, null, cluster); assertTrue("We should never choose a leader-less node in round robin", part == 0 || part == 2); if (part == 0) countForPart0++; else countForPart2++; } assertEquals("The distribution between two available partitions should be even", countForPart0, countForPart2); }
@Test public void testRoundRobin() throws InterruptedException { final String topicA = "topicA"; final String topicB = "topicB"; List<PartitionInfo> allPartitions = asList(new PartitionInfo(topicA, 0, node0, nodes, nodes), new PartitionInfo(topicA, 1, node1, nodes, nodes), new PartitionInfo(topicA, 2, node2, nodes, nodes), new PartitionInfo(topicB, 0, node0, nodes, nodes) ); Cluster testCluster = new Cluster("clusterId", asList(node0, node1, node2), allPartitions, Collections.<String>emptySet(), Collections.<String>emptySet()); final Map<Integer, Integer> partitionCount = new HashMap<>(); for (int i = 0; i < 30; ++i) { int partition = partitioner.partition(topicA, null, null, null, null, testCluster); Integer count = partitionCount.get(partition); if (null == count) count = 0; partitionCount.put(partition, count + 1); if (i % 5 == 0) { partitioner.partition(topicB, null, null, null, null, testCluster); } } assertEquals(10, (int) partitionCount.get(0)); assertEquals(10, (int) partitionCount.get(1)); assertEquals(10, (int) partitionCount.get(2)); } |
### Question:
Sender implements Runnable { public static Sensor throttleTimeSensor(Metrics metrics) { String metricGrpName = SenderMetrics.METRIC_GROUP_NAME; Sensor produceThrottleTimeSensor = metrics.sensor("produce-throttle-time"); produceThrottleTimeSensor.add(metrics.metricName("produce-throttle-time-avg", metricGrpName, "The average throttle time in ms"), new Avg()); produceThrottleTimeSensor.add(metrics.metricName("produce-throttle-time-max", metricGrpName, "The maximum throttle time in ms"), new Max()); return produceThrottleTimeSensor; } Sender(KafkaClient client,
Metadata metadata,
RecordAccumulator accumulator,
boolean guaranteeMessageOrder,
int maxRequestSize,
short acks,
int retries,
Metrics metrics,
Time time,
int requestTimeout,
long retryBackoffMs,
TransactionManager transactionManager,
ApiVersions apiVersions); void run(); void initiateClose(); void forceClose(); void wakeup(); static Sensor throttleTimeSensor(Metrics metrics); }### Answer:
@Test public void testQuotaMetrics() throws Exception { MockSelector selector = new MockSelector(time); Sensor throttleTimeSensor = Sender.throttleTimeSensor(metrics); Cluster cluster = TestUtils.singletonCluster("test", 1); Node node = cluster.nodes().get(0); NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, time, true, new ApiVersions(), throttleTimeSensor); short apiVersionsResponseVersion = ApiKeys.API_VERSIONS.latestVersion(); ByteBuffer buffer = ApiVersionsResponse.createApiVersionsResponse(400, RecordBatch.CURRENT_MAGIC_VALUE).serialize(apiVersionsResponseVersion, new ResponseHeader(0)); selector.delayedReceive(new DelayedReceive(node.idString(), new NetworkReceive(node.idString(), buffer))); while (!client.ready(node, time.milliseconds())) client.poll(1, time.milliseconds()); selector.clear(); for (int i = 1; i <= 3; i++) { int throttleTimeMs = 100 * i; ProduceRequest.Builder builder = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap()); ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true, null); client.send(request, time.milliseconds()); client.poll(1, time.milliseconds()); ProduceResponse response = produceResponse(tp0, i, Errors.NONE, throttleTimeMs); buffer = response.serialize(ApiKeys.PRODUCE.latestVersion(), new ResponseHeader(request.correlationId())); selector.completeReceive(new NetworkReceive(node.idString(), buffer)); client.poll(1, time.milliseconds()); selector.clear(); } Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric avgMetric = allMetrics.get(metrics.metricName("produce-throttle-time-avg", METRIC_GROUP, "")); KafkaMetric maxMetric = allMetrics.get(metrics.metricName("produce-throttle-time-max", METRIC_GROUP, "")); assertEquals(250, avgMetric.value(), EPS); assertEquals(400, maxMetric.value(), EPS); client.close(); } |
### Question:
KafkaProducer implements Producer<K, V> { @Override public List<PartitionInfo> partitionsFor(String topic) { try { return waitOnMetadata(topic, null, maxBlockTimeMs).cluster.partitionsForTopic(topic); } catch (InterruptedException e) { throw new InterruptException(e); } } KafkaProducer(Map<String, Object> configs); KafkaProducer(Map<String, Object> configs, Serializer<K> keySerializer, Serializer<V> valueSerializer); KafkaProducer(Properties properties); KafkaProducer(Properties properties, Serializer<K> keySerializer, Serializer<V> valueSerializer); @SuppressWarnings({"unchecked", "deprecation"}) private KafkaProducer(ProducerConfig config, Serializer<K> keySerializer, Serializer<V> valueSerializer); void initTransactions(); void beginTransaction(); void sendOffsetsToTransaction(Map<TopicPartition, OffsetAndMetadata> offsets,
String consumerGroupId); void commitTransaction(); void abortTransaction(); @Override Future<RecordMetadata> send(ProducerRecord<K, V> record); @Override // 发送消息,将消息放入RecordAccumulator暂存 Future<RecordMetadata> send(ProducerRecord<K, V> record, Callback callback); @Override void flush(); @Override // 从Metadata中获取指定的topc分区信息 List<PartitionInfo> partitionsFor(String topic); @Override Map<MetricName, ? extends Metric> metrics(); @Override void close(); @Override void close(long timeout, TimeUnit timeUnit); }### Answer:
@Test public void testTopicRefreshInMetadata() throws Exception { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, "600000"); KafkaProducer<String, String> producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); long refreshBackoffMs = 500L; long metadataExpireMs = 60000L; final Metadata metadata = new Metadata(refreshBackoffMs, metadataExpireMs, true, true, new ClusterResourceListeners()); final Time time = new MockTime(); MemberModifier.field(KafkaProducer.class, "metadata").set(producer, metadata); MemberModifier.field(KafkaProducer.class, "time").set(producer, time); final String topic = "topic"; Thread t = new Thread() { @Override public void run() { long startTimeMs = System.currentTimeMillis(); for (int i = 0; i < 10; i++) { while (!metadata.updateRequested() && System.currentTimeMillis() - startTimeMs < 1000) yield(); metadata.update(Cluster.empty(), Collections.singleton(topic), time.milliseconds()); time.sleep(60 * 1000L); } } }; t.start(); try { producer.partitionsFor(topic); fail("Expect TimeoutException"); } catch (TimeoutException e) { } Assert.assertTrue("Topic should still exist in metadata", metadata.containsTopic(topic)); } |
### Question:
KafkaAdminClient extends AdminClient { static <K, V> List<V> getOrCreateListValue(Map<K, List<V>> map, K key) { List<V> list = map.get(key); if (list != null) return list; list = new LinkedList<>(); map.put(key, list); return list; } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata,
Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames,
DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }### Answer:
@Test public void testGetOrCreateListValue() { Map<String, List<String>> map = new HashMap<>(); List<String> fooList = KafkaAdminClient.getOrCreateListValue(map, "foo"); assertNotNull(fooList); fooList.add("a"); fooList.add("b"); List<String> fooList2 = KafkaAdminClient.getOrCreateListValue(map, "foo"); assertEquals(fooList, fooList2); assertTrue(fooList2.contains("a")); assertTrue(fooList2.contains("b")); List<String> barList = KafkaAdminClient.getOrCreateListValue(map, "bar"); assertNotNull(barList); assertTrue(barList.isEmpty()); } |
### Question:
KafkaAdminClient extends AdminClient { static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) { long deltaMs = deadlineMs - now; if (deltaMs > Integer.MAX_VALUE) deltaMs = Integer.MAX_VALUE; else if (deltaMs < Integer.MIN_VALUE) deltaMs = Integer.MIN_VALUE; return (int) deltaMs; } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata,
Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames,
DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }### Answer:
@Test public void testCalcTimeoutMsRemainingAsInt() { assertEquals(0, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1000)); assertEquals(100, KafkaAdminClient.calcTimeoutMsRemainingAsInt(1000, 1100)); assertEquals(Integer.MAX_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(0, Long.MAX_VALUE)); assertEquals(Integer.MIN_VALUE, KafkaAdminClient.calcTimeoutMsRemainingAsInt(Long.MAX_VALUE, 0)); } |
### Question:
KafkaAdminClient extends AdminClient { static String prettyPrintException(Throwable throwable) { if (throwable == null) return "Null exception."; if (throwable.getMessage() != null) { return throwable.getClass().getSimpleName() + ": " + throwable.getMessage(); } return throwable.getClass().getSimpleName(); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata,
Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames,
DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }### Answer:
@Test public void testPrettyPrintException() { assertEquals("Null exception.", KafkaAdminClient.prettyPrintException(null)); assertEquals("TimeoutException", KafkaAdminClient.prettyPrintException(new TimeoutException())); assertEquals("TimeoutException: The foobar timed out.", KafkaAdminClient.prettyPrintException(new TimeoutException("The foobar timed out."))); } |
### Question:
KafkaAdminClient extends AdminClient { static String generateClientId(AdminClientConfig config) { String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG); if (!clientId.isEmpty()) return clientId; return "adminclient-" + ADMIN_CLIENT_ID_SEQUENCE.getAndIncrement(); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata,
Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames,
DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }### Answer:
@Test public void testGenerateClientId() { Set<String> ids = new HashSet<>(); for (int i = 0; i < 10; i++) { String id = KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "")); assertTrue("Got duplicate id " + id, !ids.contains(id)); ids.add(id); } assertEquals("myCustomId", KafkaAdminClient.generateClientId(newConfMap(AdminClientConfig.CLIENT_ID_CONFIG, "myCustomId"))); } |
### Question:
KafkaAdminClient extends AdminClient { @Override public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options) { final long now = time.milliseconds(); final KafkaFutureImpl<Collection<AclBinding>> future = new KafkaFutureImpl<>(); runnable.call(new Call("describeAcls", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @Override AbstractRequest.Builder createRequest(int timeoutMs) { return new DescribeAclsRequest.Builder(filter); } @Override void handleResponse(AbstractResponse abstractResponse) { DescribeAclsResponse response = (DescribeAclsResponse) abstractResponse; if (response.error().isFailure()) { future.completeExceptionally(response.error().exception()); } else { future.complete(response.acls()); } } @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); } }, now); return new DescribeAclsResult(future); } private KafkaAdminClient(AdminClientConfig config, String clientId, Time time, Metadata metadata,
Metrics metrics, KafkaClient client, TimeoutProcessorFactory timeoutProcessorFactory); @Override void close(long duration, TimeUnit unit); @Override CreateTopicsResult createTopics(final Collection<NewTopic> newTopics,
final CreateTopicsOptions options); @Override DeleteTopicsResult deleteTopics(final Collection<String> topicNames,
DeleteTopicsOptions options); @Override ListTopicsResult listTopics(final ListTopicsOptions options); @Override DescribeTopicsResult describeTopics(final Collection<String> topicNames, DescribeTopicsOptions options); @Override DescribeClusterResult describeCluster(DescribeClusterOptions options); @Override DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAclsOptions options); @Override CreateAclsResult createAcls(Collection<AclBinding> acls, CreateAclsOptions options); @Override DeleteAclsResult deleteAcls(Collection<AclBindingFilter> filters, DeleteAclsOptions options); @Override DescribeConfigsResult describeConfigs(Collection<ConfigResource> configResources, final DescribeConfigsOptions options); @Override AlterConfigsResult alterConfigs(Map<ConfigResource, Config> configs, final AlterConfigsOptions options); }### Answer:
@Test public void testDescribeAcls() throws Exception { try (MockKafkaAdminClientEnv env = mockClientEnv()) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); env.kafkaClient().setNode(env.cluster().controller()); env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, asList(ACL1, ACL2))); assertCollectionIs(env.adminClient().describeAcls(FILTER1).values().get(), ACL1, ACL2); env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, ApiError.NONE, Collections.<AclBinding>emptySet())); assertTrue(env.adminClient().describeAcls(FILTER2).values().get().isEmpty()); env.kafkaClient().prepareResponse(new DescribeAclsResponse(0, new ApiError(Errors.SECURITY_DISABLED, "Security is disabled"), Collections.<AclBinding>emptySet())); assertFutureError(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); } } |
### Question:
JmxReporter implements MetricsReporter { public void close() { synchronized (LOCK) { for (KafkaMbean mbean : this.mbeans.values()) unregister(mbean); } } JmxReporter(); JmxReporter(String prefix); @Override void configure(Map<String, ?> configs); @Override void init(List<KafkaMetric> metrics); @Override void metricChange(KafkaMetric metric); @Override void metricRemoval(KafkaMetric metric); void close(); }### Answer:
@Test public void testJmxRegistration() throws Exception { Metrics metrics = new Metrics(); try { metrics.addReporter(new JmxReporter()); Sensor sensor = metrics.sensor("kafka.requests"); sensor.add(metrics.metricName("pack.bean1.avg", "grp1"), new Avg()); sensor.add(metrics.metricName("pack.bean2.total", "grp2"), new Total()); Sensor sensor2 = metrics.sensor("kafka.blah"); sensor2.add(metrics.metricName("pack.bean1.some", "grp1"), new Total()); sensor2.add(metrics.metricName("pack.bean2.some", "grp1"), new Total()); } finally { metrics.close(); } } |
### Question:
Metrics implements Closeable { public MetricName metricName(String name, String group, String description, Map<String, String> tags) { Map<String, String> combinedTag = new LinkedHashMap<>(config.tags()); combinedTag.putAll(tags); return new MetricName(name, group, description, combinedTag); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }### Answer:
@Test public void testMetricName() { MetricName n1 = metrics.metricName("name", "group", "description", "key1", "value1", "key2", "value2"); Map<String, String> tags = new HashMap<String, String>(); tags.put("key1", "value1"); tags.put("key2", "value2"); MetricName n2 = metrics.metricName("name", "group", "description", tags); assertEquals("metric names created in two different ways should be equal", n1, n2); try { metrics.metricName("name", "group", "description", "key1"); fail("Creating MetricName with an odd number of keyValue should fail"); } catch (IllegalArgumentException e) { } } |
### Question:
Metrics implements Closeable { public Sensor sensor(String name) { return this.sensor(name, Sensor.RecordingLevel.INFO); } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }### Answer:
@Test(expected = IllegalArgumentException.class) public void testBadSensorHierarchy() { Sensor p = metrics.sensor("parent"); Sensor c1 = metrics.sensor("child1", p); Sensor c2 = metrics.sensor("child2", p); metrics.sensor("gc", c1, c2); } |
### Question:
Metrics implements Closeable { public synchronized KafkaMetric removeMetric(MetricName metricName) { KafkaMetric metric = this.metrics.remove(metricName); if (metric != null) { for (MetricsReporter reporter : reporters) reporter.metricRemoval(metric); } return metric; } Metrics(); Metrics(Time time); Metrics(MetricConfig defaultConfig, Time time); Metrics(MetricConfig defaultConfig); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time); Metrics(MetricConfig defaultConfig, List<MetricsReporter> reporters, Time time, boolean enableExpiration); MetricName metricName(String name, String group, String description, Map<String, String> tags); MetricName metricName(String name, String group, String description); MetricName metricName(String name, String group); MetricName metricName(String name, String group, String description, String... keyValue); MetricName metricName(String name, String group, Map<String, String> tags); static String toHtmlTable(String domain, List<MetricNameTemplate> allMetrics); MetricConfig config(); Sensor getSensor(String name); Sensor sensor(String name); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel); Sensor sensor(String name, Sensor... parents); Sensor sensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor.RecordingLevel recordingLevel, Sensor... parents); synchronized Sensor sensor(String name, MetricConfig config, long inactiveSensorExpirationTimeSeconds, Sensor... parents); void removeSensor(String name); void addMetric(MetricName metricName, Measurable measurable); synchronized void addMetric(MetricName metricName, MetricConfig config, Measurable measurable); synchronized KafkaMetric removeMetric(MetricName metricName); synchronized void addReporter(MetricsReporter reporter); Map<MetricName, KafkaMetric> metrics(); List<MetricsReporter> reporters(); KafkaMetric metric(MetricName metricName); MetricName metricInstance(MetricNameTemplate template, String... keyValue); MetricName metricInstance(MetricNameTemplate template, Map<String, String> tags); @Override void close(); }### Answer:
@Test public void testRemoveMetric() { int size = metrics.metrics().size(); metrics.addMetric(metrics.metricName("test1", "grp1"), new Count()); metrics.addMetric(metrics.metricName("test2", "grp1"), new Count()); assertNotNull(metrics.removeMetric(metrics.metricName("test1", "grp1"))); assertNull(metrics.metrics().get(metrics.metricName("test1", "grp1"))); assertNotNull(metrics.metrics().get(metrics.metricName("test2", "grp1"))); assertNotNull(metrics.removeMetric(metrics.metricName("test2", "grp1"))); assertNull(metrics.metrics().get(metrics.metricName("test2", "grp1"))); assertEquals(size, metrics.metrics().size()); } |
### Question:
Sensor { public boolean shouldRecord() { return this.recordingLevel.shouldRecord(config.recordLevel().id); } Sensor(Metrics registry, String name, Sensor[] parents, MetricConfig config, Time time,
long inactiveSensorExpirationTimeSeconds, RecordingLevel recordingLevel); String name(); void record(); boolean shouldRecord(); void record(double value); void record(double value, long timeMs); void record(double value, long timeMs, boolean checkQuotas); void checkQuotas(); void checkQuotas(long timeMs); void add(CompoundStat stat); synchronized void add(CompoundStat stat, MetricConfig config); void add(MetricName metricName, MeasurableStat stat); synchronized void add(MetricName metricName, MeasurableStat stat, MetricConfig config); boolean hasExpired(); }### Answer:
@Test public void testRecordLevelEnum() { Sensor.RecordingLevel configLevel = Sensor.RecordingLevel.INFO; assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id)); assertFalse(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); configLevel = Sensor.RecordingLevel.DEBUG; assertTrue(Sensor.RecordingLevel.INFO.shouldRecord(configLevel.id)); assertTrue(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString()), Sensor.RecordingLevel.DEBUG); assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString()), Sensor.RecordingLevel.INFO); }
@Test public void testShouldRecord() { MetricConfig debugConfig = new MetricConfig().recordLevel(Sensor.RecordingLevel.DEBUG); MetricConfig infoConfig = new MetricConfig().recordLevel(Sensor.RecordingLevel.INFO); Sensor infoSensor = new Sensor(null, "infoSensor", null, debugConfig, new SystemTime(), 0, Sensor.RecordingLevel.INFO); assertTrue(infoSensor.shouldRecord()); infoSensor = new Sensor(null, "infoSensor", null, debugConfig, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG); assertTrue(infoSensor.shouldRecord()); Sensor debugSensor = new Sensor(null, "debugSensor", null, infoConfig, new SystemTime(), 0, Sensor.RecordingLevel.INFO); assertTrue(debugSensor.shouldRecord()); debugSensor = new Sensor(null, "debugSensor", null, infoConfig, new SystemTime(), 0, Sensor.RecordingLevel.DEBUG); assertFalse(debugSensor.shouldRecord()); } |
### Question:
Histogram { public Histogram(BinScheme binScheme) { this.hist = new float[binScheme.bins()]; this.count = 0.0f; this.binScheme = binScheme; } Histogram(BinScheme binScheme); void record(double value); double value(double quantile); float[] counts(); void clear(); @Override String toString(); }### Answer:
@Test public void testHistogram() { BinScheme scheme = new ConstantBinScheme(12, -5, 5); Histogram hist = new Histogram(scheme); for (int i = -5; i < 5; i++) hist.record(i); for (int i = 0; i < 10; i++) assertEquals(scheme.fromBin(i + 1), hist.value(i / 10.0 + EPS), EPS); }
@Test public void testHistogram() { BinScheme scheme = new ConstantBinScheme(10, -5, 5); Histogram hist = new Histogram(scheme); for (int i = -5; i < 5; i++) hist.record(i); for (int i = 0; i < 10; i++) assertEquals(scheme.fromBin(i), hist.value(i / 10.0 + EPS), EPS); } |
### Question:
Selector implements Selectable, AutoCloseable { @Override public void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize) throws IOException { if (this.channels.containsKey(id)) throw new IllegalStateException("There is already a connection for id " + id); SocketChannel socketChannel = SocketChannel.open(); socketChannel.configureBlocking(false); Socket socket = socketChannel.socket(); socket.setKeepAlive(true); if (sendBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE) socket.setSendBufferSize(sendBufferSize); if (receiveBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE) socket.setReceiveBufferSize(receiveBufferSize); socket.setTcpNoDelay(true); boolean connected; try { connected = socketChannel.connect(address); } catch (UnresolvedAddressException e) { socketChannel.close(); throw new IOException("Can't resolve address: " + address, e); } catch (IOException e) { socketChannel.close(); throw e; } SelectionKey key = socketChannel.register(nioSelector, SelectionKey.OP_CONNECT); KafkaChannel channel; try { channel = channelBuilder.buildChannel(id, key, maxReceiveSize); } catch (Exception e) { try { socketChannel.close(); } finally { key.cancel(); } throw new IOException("Channel could not be created for socket " + socketChannel, e); } key.attach(channel); this.channels.put(id, channel); if (connected) { log.debug("Immediately connected to node {}", channel.id()); immediatelyConnectedKeys.add(key); key.interestOps(0); } } Selector(int maxReceiveSize,
long connectionMaxIdleMs,
Metrics metrics,
Time time,
String metricGrpPrefix, // group的前缀
Map<String, String> metricTags, // 创建MetricName时候使用的tags集合
boolean metricsPerConnection,
boolean recordTimePerConnection,
ChannelBuilder channelBuilder); Selector(int maxReceiveSize,
long connectionMaxIdleMs,
Metrics metrics,
Time time,
String metricGrpPrefix,
Map<String, String> metricTags,
boolean metricsPerConnection,
ChannelBuilder channelBuilder); Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder); @Override // 创建 KafkaChannel添加到channels保存 void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize); void register(String id, SocketChannel socketChannel); @Override void wakeup(); @Override void close(); void send(Send send); @Override // 轮训的时候根据选在键读写,分别调用kafka通道的read和write void poll(long timeout); @Override List<Send> completedSends(); @Override List<NetworkReceive> completedReceives(); @Override Map<String, ChannelState> disconnected(); @Override List<String> connected(); @Override void mute(String id); @Override void unmute(String id); @Override void muteAll(); @Override void unmuteAll(); void close(String id); @Override boolean isChannelReady(String id); List<KafkaChannel> channels(); KafkaChannel channel(String id); KafkaChannel closingChannel(String id); Set<SelectionKey> keys(); static final long NO_IDLE_TIMEOUT_MS; }### Answer:
@Test(expected = IOException.class) public void testNoRouteToHost() throws Exception { selector.connect("0", new InetSocketAddress("some.invalid.hostname.foo.bar.local", server.port), BUFFER_SIZE, BUFFER_SIZE); }
@Test public void testLargeMessageSequence() throws Exception { int bufferSize = 512 * 1024; String node = "0"; int reqs = 50; InetSocketAddress addr = new InetSocketAddress("localhost", server.port); connect(node, addr); String requestPrefix = TestUtils.randomString(bufferSize); sendAndReceive(node, requestPrefix, 0, reqs); } |
### Question:
Selector implements Selectable, AutoCloseable { @Override public void mute(String id) { KafkaChannel channel = channelOrFail(id, true); mute(channel); } Selector(int maxReceiveSize,
long connectionMaxIdleMs,
Metrics metrics,
Time time,
String metricGrpPrefix, // group的前缀
Map<String, String> metricTags, // 创建MetricName时候使用的tags集合
boolean metricsPerConnection,
boolean recordTimePerConnection,
ChannelBuilder channelBuilder); Selector(int maxReceiveSize,
long connectionMaxIdleMs,
Metrics metrics,
Time time,
String metricGrpPrefix,
Map<String, String> metricTags,
boolean metricsPerConnection,
ChannelBuilder channelBuilder); Selector(long connectionMaxIdleMS, Metrics metrics, Time time, String metricGrpPrefix, ChannelBuilder channelBuilder); @Override // 创建 KafkaChannel添加到channels保存 void connect(String id, InetSocketAddress address, int sendBufferSize, int receiveBufferSize); void register(String id, SocketChannel socketChannel); @Override void wakeup(); @Override void close(); void send(Send send); @Override // 轮训的时候根据选在键读写,分别调用kafka通道的read和write void poll(long timeout); @Override List<Send> completedSends(); @Override List<NetworkReceive> completedReceives(); @Override Map<String, ChannelState> disconnected(); @Override List<String> connected(); @Override void mute(String id); @Override void unmute(String id); @Override void muteAll(); @Override void unmuteAll(); void close(String id); @Override boolean isChannelReady(String id); List<KafkaChannel> channels(); KafkaChannel channel(String id); KafkaChannel closingChannel(String id); Set<SelectionKey> keys(); static final long NO_IDLE_TIMEOUT_MS; }### Answer:
@Test public void testMute() throws Exception { blockingConnect("0"); blockingConnect("1"); selector.send(createSend("0", "hello")); selector.send(createSend("1", "hi")); selector.mute("1"); while (selector.completedReceives().isEmpty()) selector.poll(5); assertEquals("We should have only one response", 1, selector.completedReceives().size()); assertEquals("The response should not be from the muted node", "0", selector.completedReceives().get(0).source()); selector.unmute("1"); do { selector.poll(5); } while (selector.completedReceives().isEmpty()); assertEquals("We should have only one response", 1, selector.completedReceives().size()); assertEquals("The response should be from the previously muted node", "1", selector.completedReceives().get(0).source()); } |
### Question:
SslTransportLayer implements TransportLayer { protected void startHandshake() throws IOException { this.netReadBuffer = ByteBuffer.allocate(netReadBufferSize()); this.netWriteBuffer = ByteBuffer.allocate(netWriteBufferSize()); this.appReadBuffer = ByteBuffer.allocate(applicationBufferSize()); netWriteBuffer.position(0); netWriteBuffer.limit(0); netReadBuffer.position(0); netReadBuffer.limit(0); handshakeComplete = false; closing = false; sslEngine.beginHandshake(); handshakeStatus = sslEngine.getHandshakeStatus(); } SslTransportLayer(String channelId, SelectionKey key, SSLEngine sslEngine, boolean enableRenegotiation); static SslTransportLayer create(String channelId, SelectionKey key, SSLEngine sslEngine); @Override boolean ready(); @Override boolean finishConnect(); @Override void disconnect(); @Override SocketChannel socketChannel(); @Override boolean isOpen(); @Override boolean isConnected(); @Override void close(); @Override boolean hasPendingWrites(); @Override void handshake(); @Override int read(ByteBuffer dst); @Override long read(ByteBuffer[] dsts); @Override long read(ByteBuffer[] dsts, int offset, int length); @Override int write(ByteBuffer src); @Override long write(ByteBuffer[] srcs, int offset, int length); @Override long write(ByteBuffer[] srcs); Principal peerPrincipal(); SSLSession sslSession(); @Override void addInterestOps(int ops); @Override void removeInterestOps(int ops); @Override boolean isMute(); @Override long transferFrom(FileChannel fileChannel, long position, long count); }### Answer:
@Test public void testClientEndpointNotValidated() throws Exception { String node = "0"; clientCertStores = new CertStores(false, "non-existent.com"); serverCertStores = new CertStores(true, "localhost"); sslServerConfigs = serverCertStores.getTrustingConfig(clientCertStores); sslClientConfigs = clientCertStores.getTrustingConfig(serverCertStores); SslChannelBuilder serverChannelBuilder = new SslChannelBuilder(Mode.SERVER) { @Override protected SslTransportLayer buildTransportLayer(SslFactory sslFactory, String id, SelectionKey key, String host) throws IOException { SocketChannel socketChannel = (SocketChannel) key.channel(); SSLEngine sslEngine = sslFactory.createSslEngine(host, socketChannel.socket().getPort()); SSLParameters sslParams = sslEngine.getSSLParameters(); sslParams.setEndpointIdentificationAlgorithm("HTTPS"); sslEngine.setSSLParameters(sslParams); TestSslTransportLayer transportLayer = new TestSslTransportLayer(id, key, sslEngine, BUFFER_SIZE, BUFFER_SIZE, BUFFER_SIZE); transportLayer.startHandshake(); return transportLayer; } }; serverChannelBuilder.configure(sslServerConfigs); server = new NioEchoServer(ListenerName.forSecurityProtocol(SecurityProtocol.SSL), SecurityProtocol.SSL, new TestSecurityConfig(sslServerConfigs), "localhost", serverChannelBuilder); server.start(); createSelector(sslClientConfigs); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); selector.connect(node, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.checkClientConnection(selector, node, 100, 10); } |
### Question:
AclBinding { @Override public boolean equals(Object o) { if (!(o instanceof AclBinding)) return false; AclBinding other = (AclBinding) o; return resource.equals(other.resource) && entry.equals(other.entry); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); }### Answer:
@Test public void testMatching() throws Exception { assertTrue(ACL1.equals(ACL1)); final AclBinding acl1Copy = new AclBinding( new Resource(ResourceType.TOPIC, "mytopic"), new AccessControlEntry("User:ANONYMOUS", "", AclOperation.ALL, AclPermissionType.ALLOW)); assertTrue(ACL1.equals(acl1Copy)); assertTrue(acl1Copy.equals(ACL1)); assertTrue(ACL2.equals(ACL2)); assertFalse(ACL1.equals(ACL2)); assertFalse(ACL2.equals(ACL1)); assertTrue(AclBindingFilter.ANY.matches(ACL1)); assertFalse(AclBindingFilter.ANY.equals(ACL1)); assertTrue(AclBindingFilter.ANY.matches(ACL2)); assertFalse(AclBindingFilter.ANY.equals(ACL2)); assertTrue(AclBindingFilter.ANY.matches(ACL3)); assertFalse(AclBindingFilter.ANY.equals(ACL3)); assertTrue(AclBindingFilter.ANY.equals(AclBindingFilter.ANY)); assertTrue(ANY_ANONYMOUS.matches(ACL1)); assertFalse(ANY_ANONYMOUS.equals(ACL1)); assertFalse(ANY_ANONYMOUS.matches(ACL2)); assertFalse(ANY_ANONYMOUS.equals(ACL2)); assertTrue(ANY_ANONYMOUS.matches(ACL3)); assertFalse(ANY_ANONYMOUS.equals(ACL3)); assertFalse(ANY_DENY.matches(ACL1)); assertFalse(ANY_DENY.matches(ACL2)); assertTrue(ANY_DENY.matches(ACL3)); assertTrue(ANY_MYTOPIC.matches(ACL1)); assertTrue(ANY_MYTOPIC.matches(ACL2)); assertFalse(ANY_MYTOPIC.matches(ACL3)); assertTrue(ANY_ANONYMOUS.matches(UNKNOWN_ACL)); assertTrue(ANY_DENY.matches(UNKNOWN_ACL)); assertTrue(UNKNOWN_ACL.equals(UNKNOWN_ACL)); assertFalse(ANY_MYTOPIC.matches(UNKNOWN_ACL)); } |
### Question:
AclBinding { public boolean isUnknown() { return resource.isUnknown() || entry.isUnknown(); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); }### Answer:
@Test public void testUnknowns() throws Exception { assertFalse(ACL1.isUnknown()); assertFalse(ACL2.isUnknown()); assertFalse(ACL3.isUnknown()); assertFalse(ANY_ANONYMOUS.isUnknown()); assertFalse(ANY_DENY.isUnknown()); assertFalse(ANY_MYTOPIC.isUnknown()); assertTrue(UNKNOWN_ACL.isUnknown()); } |
### Question:
AclBinding { public AclBindingFilter toFilter() { return new AclBindingFilter(resource.toFilter(), entry.toFilter()); } AclBinding(Resource resource, AccessControlEntry entry); boolean isUnknown(); Resource resource(); final AccessControlEntry entry(); AclBindingFilter toFilter(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); }### Answer:
@Test public void testMatchesAtMostOne() throws Exception { assertEquals(null, ACL1.toFilter().findIndefiniteField()); assertEquals(null, ACL2.toFilter().findIndefiniteField()); assertEquals(null, ACL3.toFilter().findIndefiniteField()); assertFalse(ANY_ANONYMOUS.matchesAtMostOne()); assertFalse(ANY_DENY.matchesAtMostOne()); assertFalse(ANY_MYTOPIC.matchesAtMostOne()); } |
### Question:
JaasContext { public static JaasContext load(JaasContext.Type contextType, ListenerName listenerName, Map<String, ?> configs) { String listenerContextName; String globalContextName; switch (contextType) { case CLIENT: if (listenerName != null) throw new IllegalArgumentException("listenerName should be null for CLIENT"); globalContextName = GLOBAL_CONTEXT_NAME_CLIENT; listenerContextName = null; break; case SERVER: if (listenerName == null) throw new IllegalArgumentException("listenerName should not be null for SERVER"); globalContextName = GLOBAL_CONTEXT_NAME_SERVER; listenerContextName = listenerName.value().toLowerCase(Locale.ROOT) + "." + GLOBAL_CONTEXT_NAME_SERVER; break; default: throw new IllegalArgumentException("Unexpected context type " + contextType); } return load(contextType, listenerContextName, globalContextName, configs); } JaasContext(String name, Type type, Configuration configuration); static JaasContext load(JaasContext.Type contextType, ListenerName listenerName,
Map<String, ?> configs); String name(); Type type(); Configuration configuration(); List<AppConfigurationEntry> configurationEntries(); String configEntryOption(String key, String loginModuleName); }### Answer:
@Test(expected = IllegalArgumentException.class) public void testLoadForServerWithWrongListenerName() throws IOException { writeConfiguration("Server", "test.LoginModule required;"); JaasContext.load(JaasContext.Type.SERVER, new ListenerName("plaintext"), Collections.<String, Object>emptyMap()); }
@Test(expected = IllegalArgumentException.class) public void testLoadForClientWithListenerName() { JaasContext.load(JaasContext.Type.CLIENT, new ListenerName("foo"), Collections.<String, Object>emptyMap()); } |
### Question:
SslFactory implements Configurable { @Override public void configure(Map<String, ?> configs) throws KafkaException { this.protocol = (String) configs.get(SslConfigs.SSL_PROTOCOL_CONFIG); this.provider = (String) configs.get(SslConfigs.SSL_PROVIDER_CONFIG); @SuppressWarnings("unchecked") List<String> cipherSuitesList = (List<String>) configs.get(SslConfigs.SSL_CIPHER_SUITES_CONFIG); if (cipherSuitesList != null) this.cipherSuites = cipherSuitesList.toArray(new String[cipherSuitesList.size()]); @SuppressWarnings("unchecked") List<String> enabledProtocolsList = (List<String>) configs.get(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); if (enabledProtocolsList != null) this.enabledProtocols = enabledProtocolsList.toArray(new String[enabledProtocolsList.size()]); String endpointIdentification = (String) configs.get(SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG); if (endpointIdentification != null) this.endpointIdentification = endpointIdentification; String secureRandomImplementation = (String) configs.get(SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG); if (secureRandomImplementation != null) { try { this.secureRandomImplementation = SecureRandom.getInstance(secureRandomImplementation); } catch (GeneralSecurityException e) { throw new KafkaException(e); } } String clientAuthConfig = clientAuthConfigOverride; if (clientAuthConfig == null) clientAuthConfig = (String) configs.get(SslConfigs.SSL_CLIENT_AUTH_CONFIG); if (clientAuthConfig != null) { if (clientAuthConfig.equals("required")) this.needClientAuth = true; else if (clientAuthConfig.equals("requested")) this.wantClientAuth = true; } this.kmfAlgorithm = (String) configs.get(SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG); this.tmfAlgorithm = (String) configs.get(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG); createKeystore((String) configs.get(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG), (String) configs.get(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG), (Password) configs.get(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG), (Password) configs.get(SslConfigs.SSL_KEY_PASSWORD_CONFIG)); createTruststore((String) configs.get(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG), (String) configs.get(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG), (Password) configs.get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG)); try { this.sslContext = createSSLContext(); } catch (Exception e) { throw new KafkaException(e); } } SslFactory(Mode mode); SslFactory(Mode mode, String clientAuthConfigOverride); @Override void configure(Map<String, ?> configs); SSLEngine createSslEngine(String peerHost, int peerPort); SSLContext sslContext(); }### Answer:
@Test public void testSslFactoryWithoutPasswordConfiguration() throws Exception { File trustStoreFile = File.createTempFile("truststore", ".jks"); Map<String, Object> serverSslConfig = TestSslUtils.createSslConfig(false, true, Mode.SERVER, trustStoreFile, "server"); serverSslConfig.remove(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG); SslFactory sslFactory = new SslFactory(Mode.SERVER); try { sslFactory.configure(serverSslConfig); } catch (Exception e) { fail("An exception was thrown when configuring the truststore without a password: " + e); } } |
### Question:
ScramCredentialUtils { public static String credentialToString(ScramCredential credential) { return String.format("%s=%s,%s=%s,%s=%s,%s=%d", SALT, DatatypeConverter.printBase64Binary(credential.salt()), STORED_KEY, DatatypeConverter.printBase64Binary(credential.storedKey()), SERVER_KEY, DatatypeConverter.printBase64Binary(credential.serverKey()), ITERATIONS, credential.iterations()); } static String credentialToString(ScramCredential credential); static ScramCredential credentialFromString(String str); static void createCache(CredentialCache cache, Collection<String> enabledMechanisms); }### Answer:
@Test public void generateCredential() { ScramCredential credential1 = formatter.generateCredential("password", 4096); ScramCredential credential2 = formatter.generateCredential("password", 4096); assertNotEquals(ScramCredentialUtils.credentialToString(credential1), ScramCredentialUtils.credentialToString(credential2)); } |
### Question:
ScramCredentialUtils { public static ScramCredential credentialFromString(String str) { Properties props = toProps(str); if (props.size() != 4 || !props.containsKey(SALT) || !props.containsKey(STORED_KEY) || !props.containsKey(SERVER_KEY) || !props.containsKey(ITERATIONS)) { throw new IllegalArgumentException("Credentials not valid: " + str); } byte[] salt = DatatypeConverter.parseBase64Binary(props.getProperty(SALT)); byte[] storedKey = DatatypeConverter.parseBase64Binary(props.getProperty(STORED_KEY)); byte[] serverKey = DatatypeConverter.parseBase64Binary(props.getProperty(SERVER_KEY)); int iterations = Integer.parseInt(props.getProperty(ITERATIONS)); return new ScramCredential(salt, storedKey, serverKey, iterations); } static String credentialToString(ScramCredential credential); static ScramCredential credentialFromString(String str); static void createCache(CredentialCache cache, Collection<String> enabledMechanisms); }### Answer:
@Test(expected = IllegalArgumentException.class) public void invalidCredential() { ScramCredentialUtils.credentialFromString("abc"); } |
### Question:
ScramCredentialUtils { public static void createCache(CredentialCache cache, Collection<String> enabledMechanisms) { for (String mechanism : ScramMechanism.mechanismNames()) { if (enabledMechanisms.contains(mechanism)) cache.createCache(mechanism, ScramCredential.class); } } static String credentialToString(ScramCredential credential); static ScramCredential credentialFromString(String str); static void createCache(CredentialCache cache, Collection<String> enabledMechanisms); }### Answer:
@Test public void scramCredentialCache() throws Exception { CredentialCache cache = new CredentialCache(); ScramCredentialUtils.createCache(cache, Arrays.asList("SCRAM-SHA-512", "PLAIN")); assertNotNull("Cache not created for enabled mechanism", cache.cache(ScramMechanism.SCRAM_SHA_512.mechanismName(), ScramCredential.class)); assertNull("Cache created for disabled mechanism", cache.cache(ScramMechanism.SCRAM_SHA_256.mechanismName(), ScramCredential.class)); CredentialCache.Cache<ScramCredential> sha512Cache = cache.cache(ScramMechanism.SCRAM_SHA_512.mechanismName(), ScramCredential.class); ScramFormatter formatter = new ScramFormatter(ScramMechanism.SCRAM_SHA_512); ScramCredential credentialA = formatter.generateCredential("password", 4096); sha512Cache.put("userA", credentialA); assertEquals(credentialA, sha512Cache.get("userA")); assertNull("Invalid user credential", sha512Cache.get("userB")); } |
### Question:
KafkaPrincipal implements Principal { @Override public int hashCode() { int result = principalType.hashCode(); result = 31 * result + name.hashCode(); return result; } KafkaPrincipal(String principalType, String name); static KafkaPrincipal fromString(String str); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); @Override String getName(); String getPrincipalType(); static final String SEPARATOR; static final String USER_TYPE; final static KafkaPrincipal ANONYMOUS; }### Answer:
@Test public void testEqualsAndHashCode() { String name = "KafkaUser"; KafkaPrincipal principal1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name); KafkaPrincipal principal2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, name); Assert.assertEquals(principal1.hashCode(), principal2.hashCode()); Assert.assertEquals(principal1, principal2); } |
### Question:
ConfigDef { public Set<String> names() { return Collections.unmodifiableSet(configKeys.keySet()); } ConfigDef(); ConfigDef(ConfigDef base); Set<String> names(); ConfigDef define(ConfigKey key); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation,
String group, int orderInGroup, Width width, String displayName); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup,
Width width, String displayName, List<String> dependents, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup,
Width width, String displayName, List<String> dependents); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup,
Width width, String displayName, Recommender recommender); ConfigDef define(String name, Type type, Importance importance, String documentation, String group, int orderInGroup,
Width width, String displayName); ConfigDef define(String name, Type type, Object defaultValue, Validator validator, Importance importance, String documentation); ConfigDef define(String name, Type type, Object defaultValue, Importance importance, String documentation); ConfigDef define(String name, Type type, Importance importance, String documentation); ConfigDef defineInternal(final String name, final Type type, final Object defaultValue, final Importance importance); Map<String, ConfigKey> configKeys(); List<String> groups(); ConfigDef withClientSslSupport(); ConfigDef withClientSaslSupport(); Map<String, Object> parse(Map<?, ?> props); List<ConfigValue> validate(Map<String, String> props); Map<String, ConfigValue> validateAll(Map<String, String> props); static Object parseType(String name, Object value, Type type); static String convertToString(Object parsedValue, Type type); String toHtmlTable(); String toRst(); String toEnrichedRst(); void embed(final String keyPrefix, final String groupPrefix, final int startingOrd, final ConfigDef child); static final Object NO_DEFAULT_VALUE; }### Answer:
@Test public void testNames() { final ConfigDef configDef = new ConfigDef() .define("a", Type.STRING, Importance.LOW, "docs") .define("b", Type.STRING, Importance.LOW, "docs"); Set<String> names = configDef.names(); assertEquals(new HashSet<>(Arrays.asList("a", "b")), names); try { names.add("new"); fail(); } catch (UnsupportedOperationException e) { } } |
### Question:
AbstractConfig { public Map<String, Object> originalsWithPrefix(String prefix) { Map<String, Object> result = new RecordingMap<>(prefix, false); for (Map.Entry<String, ?> entry : originals.entrySet()) { if (entry.getKey().startsWith(prefix) && entry.getKey().length() > prefix.length()) result.put(entry.getKey().substring(prefix.length()), entry.getValue()); } return result; } @SuppressWarnings("unchecked") AbstractConfig(ConfigDef definition, Map<?, ?> originals, boolean doLog); AbstractConfig(ConfigDef definition, Map<?, ?> originals); void ignore(String key); Short getShort(String key); Integer getInt(String key); Long getLong(String key); Double getDouble(String key); @SuppressWarnings("unchecked") List<String> getList(String key); Boolean getBoolean(String key); String getString(String key); ConfigDef.Type typeOf(String key); Password getPassword(String key); Class<?> getClass(String key); Set<String> unused(); Map<String, Object> originals(); Map<String, String> originalsStrings(); Map<String, Object> originalsWithPrefix(String prefix); Map<String, Object> valuesWithPrefixOverride(String prefix); Map<String, ?> values(); void logUnused(); T getConfiguredInstance(String key, Class<T> t); List<T> getConfiguredInstances(String key, Class<T> t); List<T> getConfiguredInstances(String key, Class<T> t, Map<String, Object> configOverrides); @Override boolean equals(Object o); @Override int hashCode(); }### Answer:
@Test public void testOriginalsWithPrefix() { Properties props = new Properties(); props.put("foo.bar", "abc"); props.put("setting", "def"); TestConfig config = new TestConfig(props); Map<String, Object> originalsWithPrefix = config.originalsWithPrefix("foo."); assertTrue(config.unused().contains("foo.bar")); originalsWithPrefix.get("bar"); assertFalse(config.unused().contains("foo.bar")); Map<String, Object> expected = new HashMap<>(); expected.put("bar", "abc"); assertEquals(expected, originalsWithPrefix); } |
### Question:
AbstractConfig { public Set<String> unused() { Set<String> keys = new HashSet<>(originals.keySet()); keys.removeAll(used); return keys; } @SuppressWarnings("unchecked") AbstractConfig(ConfigDef definition, Map<?, ?> originals, boolean doLog); AbstractConfig(ConfigDef definition, Map<?, ?> originals); void ignore(String key); Short getShort(String key); Integer getInt(String key); Long getLong(String key); Double getDouble(String key); @SuppressWarnings("unchecked") List<String> getList(String key); Boolean getBoolean(String key); String getString(String key); ConfigDef.Type typeOf(String key); Password getPassword(String key); Class<?> getClass(String key); Set<String> unused(); Map<String, Object> originals(); Map<String, String> originalsStrings(); Map<String, Object> originalsWithPrefix(String prefix); Map<String, Object> valuesWithPrefixOverride(String prefix); Map<String, ?> values(); void logUnused(); T getConfiguredInstance(String key, Class<T> t); List<T> getConfiguredInstances(String key, Class<T> t); List<T> getConfiguredInstances(String key, Class<T> t, Map<String, Object> configOverrides); @Override boolean equals(Object o); @Override int hashCode(); }### Answer:
@Test public void testUnused() { Properties props = new Properties(); String configValue = "org.apache.kafka.common.config.AbstractConfigTest$ConfiguredFakeMetricsReporter"; props.put(TestConfig.METRIC_REPORTER_CLASSES_CONFIG, configValue); props.put(FakeMetricsReporterConfig.EXTRA_CONFIG, "my_value"); TestConfig config = new TestConfig(props); assertTrue("metric.extra_config should be marked unused before getConfiguredInstances is called", config.unused().contains(FakeMetricsReporterConfig.EXTRA_CONFIG)); config.getConfiguredInstances(TestConfig.METRIC_REPORTER_CLASSES_CONFIG, MetricsReporter.class); assertTrue("All defined configurations should be marked as used", config.unused().isEmpty()); } |
### Question:
Cluster { public static Cluster bootstrap(List<InetSocketAddress> addresses) { List<Node> nodes = new ArrayList<>(); int nodeId = -1; for (InetSocketAddress address : addresses) nodes.add(new Node(nodeId--, address.getHostString(), address.getPort())); return new Cluster(null, true, nodes, new ArrayList<PartitionInfo>(0), Collections.<String>emptySet(), Collections.<String>emptySet(), null); } Cluster(String clusterId,
Collection<Node> nodes,
Collection<PartitionInfo> partitions,
Set<String> unauthorizedTopics,
Set<String> internalTopics); Cluster(String clusterId,
Collection<Node> nodes,
Collection<PartitionInfo> partitions,
Set<String> unauthorizedTopics,
Set<String> internalTopics,
Node controller); private Cluster(String clusterId,
boolean isBootstrapConfigured,
Collection<Node> nodes,
Collection<PartitionInfo> partitions,
Set<String> unauthorizedTopics,
Set<String> internalTopics,
Node controller); static Cluster empty(); static Cluster bootstrap(List<InetSocketAddress> addresses); Cluster withPartitions(Map<TopicPartition, PartitionInfo> partitions); List<Node> nodes(); Node nodeById(int id); Node leaderFor(TopicPartition topicPartition); PartitionInfo partition(TopicPartition topicPartition); List<PartitionInfo> partitionsForTopic(String topic); Integer partitionCountForTopic(String topic); List<PartitionInfo> availablePartitionsForTopic(String topic); List<PartitionInfo> partitionsForNode(int nodeId); Set<String> topics(); Set<String> unauthorizedTopics(); Set<String> internalTopics(); boolean isBootstrapConfigured(); ClusterResource clusterResource(); Node controller(); @Override String toString(); }### Answer:
@Test public void testBootstrap() { String ipAddress = "140.211.11.105"; String hostName = "www.example.com"; Cluster cluster = Cluster.bootstrap(Arrays.asList( new InetSocketAddress(ipAddress, 9002), new InetSocketAddress(hostName, 9002) )); Set<String> expectedHosts = Utils.mkSet(ipAddress, hostName); Set<String> actualHosts = new HashSet<>(); for (Node node : cluster.nodes()) actualHosts.add(node.host()); assertEquals(expectedHosts, actualHosts); } |
### Question:
PartitionInfo { @Override public String toString() { return String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s)", topic, partition, leader == null ? "none" : leader.idString(), formatNodeIds(replicas), formatNodeIds(inSyncReplicas)); } PartitionInfo(String topic, int partition, Node leader, Node[] replicas, Node[] inSyncReplicas); String topic(); int partition(); Node leader(); Node[] replicas(); Node[] inSyncReplicas(); @Override String toString(); }### Answer:
@Test public void testToString() { String topic = "sample"; int partition = 0; Node leader = new Node(0, "localhost", 9092); Node r1 = new Node(1, "localhost", 9093); Node r2 = new Node(2, "localhost", 9094); Node[] replicas = new Node[] {leader, r1, r2}; Node[] inSyncReplicas = new Node[] {leader, r1, r2}; PartitionInfo partitionInfo = new PartitionInfo(topic, partition, leader, replicas, inSyncReplicas); String expected = String.format("Partition(topic = %s, partition = %d, leader = %s, replicas = %s, isr = %s)", topic, partition, leader.idString(), "[0,1,2]", "[0,1,2]"); Assert.assertEquals(expected, partitionInfo.toString()); } |
### Question:
RecordHeaders implements Headers { @Override public Headers add(Header header) throws IllegalStateException { canWrite(); headers.add(header); return this; } RecordHeaders(); RecordHeaders(Header[] headers); RecordHeaders(Iterable<Header> headers); @Override Headers add(Header header); @Override Headers add(String key, byte[] value); @Override Headers remove(String key); @Override Header lastHeader(String key); @Override Iterable<Header> headers(final String key); @Override Iterator<Header> iterator(); void setReadOnly(); Header[] toArray(); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); }### Answer:
@Test public void testAdd() { Headers headers = new RecordHeaders(); headers.add(new RecordHeader("key", "value".getBytes())); Header header = headers.iterator().next(); assertHeader("key", "value", header); headers.add(new RecordHeader("key2", "value2".getBytes())); assertHeader("key2", "value2", headers.lastHeader("key2")); assertEquals(2, getCount(headers)); } |
### Question:
RecordHeaders implements Headers { @Override public Iterable<Header> headers(final String key) { checkKey(key); return new Iterable<Header>() { @Override public Iterator<Header> iterator() { return new FilterByKeyIterator(headers.iterator(), key); } }; } RecordHeaders(); RecordHeaders(Header[] headers); RecordHeaders(Iterable<Header> headers); @Override Headers add(Header header); @Override Headers add(String key, byte[] value); @Override Headers remove(String key); @Override Header lastHeader(String key); @Override Iterable<Header> headers(final String key); @Override Iterator<Header> iterator(); void setReadOnly(); Header[] toArray(); @Override boolean equals(Object o); @Override int hashCode(); @Override String toString(); }### Answer:
@Test public void testHeaders() throws IOException { RecordHeaders headers = new RecordHeaders(); headers.add(new RecordHeader("key", "value".getBytes())); headers.add(new RecordHeader("key1", "key1value".getBytes())); headers.add(new RecordHeader("key", "value2".getBytes())); headers.add(new RecordHeader("key2", "key2value".getBytes())); Iterator<Header> keyHeaders = headers.headers("key").iterator(); assertHeader("key", "value", keyHeaders.next()); assertHeader("key", "value2", keyHeaders.next()); assertFalse(keyHeaders.hasNext()); keyHeaders = headers.headers("key1").iterator(); assertHeader("key1", "key1value", keyHeaders.next()); assertFalse(keyHeaders.hasNext()); keyHeaders = headers.headers("key2").iterator(); assertHeader("key2", "key2value", keyHeaders.next()); assertFalse(keyHeaders.hasNext()); } |
### Question:
Topic { static boolean containsValidPattern(String topic) { return LEGAL_CHARS_PATTERN.matcher(topic).matches(); } static void validate(String topic); static boolean isInternal(String topic); static boolean hasCollisionChars(String topic); static boolean hasCollision(String topicA, String topicB); static final String GROUP_METADATA_TOPIC_NAME; static final String TRANSACTION_STATE_TOPIC_NAME; static final String LEGAL_CHARS; }### Answer:
@Test public void shouldRecognizeInvalidCharactersInTopicNames() { char[] invalidChars = {'/', '\\', ',', '\u0000', ':', '"', '\'', ';', '*', '?', ' ', '\t', '\r', '\n', '='}; for (char c : invalidChars) { String topicName = "Is " + c + "illegal"; assertFalse(Topic.containsValidPattern(topicName)); } } |
### Question:
Java { public static boolean isIBMJdk() { return System.getProperty("java.vendor").contains("IBM"); } private Java(); static boolean isIBMJdk(); static final String JVM_SPEC_VERSION; static final boolean IS_JAVA9_COMPATIBLE; }### Answer:
@Test public void testIsIBMJdk() { System.setProperty("java.vendor", "Oracle Corporation"); assertFalse(Java.isIBMJdk()); System.setProperty("java.vendor", "IBM Corporation"); assertTrue(Java.isIBMJdk()); }
@Test public void testLoadKerberosLoginModule() throws ClassNotFoundException { String clazz = Java.isIBMJdk() ? "com.ibm.security.auth.module.Krb5LoginModule" : "com.sun.security.auth.module.Krb5LoginModule"; Class.forName(clazz); } |
### Question:
Crc32C { public static Checksum create() { return CHECKSUM_FACTORY.create(); } static long compute(byte[] bytes, int offset, int size); static long compute(ByteBuffer buffer, int offset, int size); static Checksum create(); }### Answer:
@Test public void testUpdate() { final byte[] bytes = "Any String you want".getBytes(); final int len = bytes.length; Checksum crc1 = Crc32C.create(); Checksum crc2 = Crc32C.create(); Checksum crc3 = Crc32C.create(); crc1.update(bytes, 0, len); for (int i = 0; i < len; i++) crc2.update(bytes[i]); crc3.update(bytes, 0, len / 2); crc3.update(bytes, len / 2, len - len / 2); assertEquals("Crc values should be the same", crc1.getValue(), crc2.getValue()); assertEquals("Crc values should be the same", crc1.getValue(), crc3.getValue()); } |
### Question:
Crc32C { public static long compute(byte[] bytes, int offset, int size) { Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); } static long compute(byte[] bytes, int offset, int size); static long compute(ByteBuffer buffer, int offset, int size); static Checksum create(); }### Answer:
@Test public void testValue() { final byte[] bytes = "Some String".getBytes(); assertEquals(608512271, Crc32C.compute(bytes, 0, bytes.length)); } |
### Question:
Crc32 implements Checksum { @Override public void update(byte[] b, int off, int len) { if (off < 0 || len < 0 || off > b.length - len) throw new ArrayIndexOutOfBoundsException(); int localCrc = crc; while (len > 7) { final int c0 = (b[off + 0] ^ localCrc) & 0xff; final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); final int c4 = b[off + 4] & 0xff; final int c5 = b[off + 5] & 0xff; final int c6 = b[off + 6] & 0xff; final int c7 = b[off + 7] & 0xff; localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); off += 8; len -= 8; } switch (len) { case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; default: } crc = localCrc; } Crc32(); static long crc32(byte[] bytes); static long crc32(byte[] bytes, int offset, int size); static long crc32(ByteBuffer buffer, int offset, int size); @Override long getValue(); @Override void reset(); @Override void update(byte[] b, int off, int len); @Override final void update(int b); }### Answer:
@Test public void testUpdate() { final byte[] bytes = "Any String you want".getBytes(); final int len = bytes.length; Checksum crc1 = Crc32C.create(); Checksum crc2 = Crc32C.create(); Checksum crc3 = Crc32C.create(); crc1.update(bytes, 0, len); for (int i = 0; i < len; i++) crc2.update(bytes[i]); crc3.update(bytes, 0, len / 2); crc3.update(bytes, len / 2, len - len / 2); assertEquals("Crc values should be the same", crc1.getValue(), crc2.getValue()); assertEquals("Crc values should be the same", crc1.getValue(), crc3.getValue()); } |
### Question:
Crc32 implements Checksum { public static long crc32(byte[] bytes) { return crc32(bytes, 0, bytes.length); } Crc32(); static long crc32(byte[] bytes); static long crc32(byte[] bytes, int offset, int size); static long crc32(ByteBuffer buffer, int offset, int size); @Override long getValue(); @Override void reset(); @Override void update(byte[] b, int off, int len); @Override final void update(int b); }### Answer:
@Test public void testValue() { final byte[] bytes = "Some String".getBytes(); assertEquals(2021503672, Crc32.crc32(bytes)); } |
### Question:
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }### Answer:
@Test public void testReadUnsignedIntLEFromArray() { byte[] array1 = {0x01, 0x02, 0x03, 0x04, 0x05}; assertEquals(0x04030201, ByteUtils.readUnsignedIntLE(array1, 0)); assertEquals(0x05040302, ByteUtils.readUnsignedIntLE(array1, 1)); byte[] array2 = {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4, (byte) 0xf5, (byte) 0xf6}; assertEquals(0xf4f3f2f1, ByteUtils.readUnsignedIntLE(array2, 0)); assertEquals(0xf6f5f4f3, ByteUtils.readUnsignedIntLE(array2, 2)); }
@Test public void testReadUnsignedIntLEFromInputStream() throws IOException { byte[] array1 = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}; ByteArrayInputStream is1 = new ByteArrayInputStream(array1); assertEquals(0x04030201, ByteUtils.readUnsignedIntLE(is1)); assertEquals(0x08070605, ByteUtils.readUnsignedIntLE(is1)); byte[] array2 = {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4, (byte) 0xf5, (byte) 0xf6, (byte) 0xf7, (byte) 0xf8}; ByteArrayInputStream is2 = new ByteArrayInputStream(array2); assertEquals(0xf4f3f2f1, ByteUtils.readUnsignedIntLE(is2)); assertEquals(0xf8f7f6f5, ByteUtils.readUnsignedIntLE(is2)); } |
### Question:
ByteUtils { public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }### Answer:
@Test public void testReadUnsignedInt() { ByteBuffer buffer = ByteBuffer.allocate(4); long writeValue = 133444; ByteUtils.writeUnsignedInt(buffer, writeValue); buffer.flip(); long readValue = ByteUtils.readUnsignedInt(buffer); assertEquals(writeValue, readValue); } |
### Question:
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }### Answer:
@Test public void testWriteUnsignedIntLEToArray() { int value1 = 0x04030201; byte[] array1 = new byte[4]; ByteUtils.writeUnsignedIntLE(array1, 0, value1); assertArrayEquals(new byte[] {0x01, 0x02, 0x03, 0x04}, array1); array1 = new byte[8]; ByteUtils.writeUnsignedIntLE(array1, 2, value1); assertArrayEquals(new byte[] {0, 0, 0x01, 0x02, 0x03, 0x04, 0, 0}, array1); int value2 = 0xf4f3f2f1; byte[] array2 = new byte[4]; ByteUtils.writeUnsignedIntLE(array2, 0, value2); assertArrayEquals(new byte[] {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4}, array2); array2 = new byte[8]; ByteUtils.writeUnsignedIntLE(array2, 2, value2); assertArrayEquals(new byte[] {0, 0, (byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4, 0, 0}, array2); }
@Test public void testWriteUnsignedIntLEToOutputStream() throws IOException { int value1 = 0x04030201; ByteArrayOutputStream os1 = new ByteArrayOutputStream(); ByteUtils.writeUnsignedIntLE(os1, value1); ByteUtils.writeUnsignedIntLE(os1, value1); assertArrayEquals(new byte[] {0x01, 0x02, 0x03, 0x04, 0x01, 0x02, 0x03, 0x04}, os1.toByteArray()); int value2 = 0xf4f3f2f1; ByteArrayOutputStream os2 = new ByteArrayOutputStream(); ByteUtils.writeUnsignedIntLE(os2, value2); assertArrayEquals(new byte[] {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4}, os2.toByteArray()); } |
### Question:
ByteUtils { public static int readVarint(ByteBuffer buffer) { int value = 0; int i = 0; int b; while (((b = buffer.get()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 28) throw illegalVarintException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }### Answer:
@Test(expected = IllegalArgumentException.class) public void testInvalidVarint() { ByteBuffer buf = ByteBuffer.wrap(new byte[] {xFF, xFF, xFF, xFF, xFF, x01}); ByteUtils.readVarint(buf); } |
### Question:
ByteUtils { public static long readVarlong(DataInput in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.readByte()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }### Answer:
@Test(expected = IllegalArgumentException.class) public void testInvalidVarlong() { ByteBuffer buf = ByteBuffer.wrap(new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, x01}); ByteUtils.readVarlong(buf); } |
### Question:
Utils { public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? matcher.group(1) : null; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testGetHost() { assertEquals("127.0.0.1", getHost("127.0.0.1:8000")); assertEquals("mydomain.com", getHost("PLAINTEXT: assertEquals("MyDomain.com", getHost("PLAINTEXT: assertEquals("My_Domain.com", getHost("PLAINTEXT: assertEquals("::1", getHost("[::1]:1234")); assertEquals("2001:db8:85a3:8d3:1319:8a2e:370:7348", getHost("PLAINTEXT: assertEquals("2001:DB8:85A3:8D3:1319:8A2E:370:7348", getHost("PLAINTEXT: assertEquals("fe80::b1da:69ca:57f7:63d8%3", getHost("PLAINTEXT: } |
### Question:
Utils { public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testGetPort() { assertEquals(8000, getPort("127.0.0.1:8000").intValue()); assertEquals(8080, getPort("mydomain.com:8080").intValue()); assertEquals(8080, getPort("MyDomain.com:8080").intValue()); assertEquals(1234, getPort("[::1]:1234").intValue()); assertEquals(5678, getPort("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); assertEquals(5678, getPort("[2001:DB8:85A3:8D3:1319:8A2E:370:7348]:5678").intValue()); assertEquals(5678, getPort("[fe80::b1da:69ca:57f7:63d8%3]:5678").intValue()); } |
### Question:
Utils { public static String formatAddress(String host, Integer port) { return host.contains(":") ? "[" + host + "]:" + port : host + ":" + port; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testFormatAddress() { assertEquals("127.0.0.1:8000", formatAddress("127.0.0.1", 8000)); assertEquals("mydomain.com:8080", formatAddress("mydomain.com", 8080)); assertEquals("[::1]:1234", formatAddress("::1", 1234)); assertEquals("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678", formatAddress("2001:db8:85a3:8d3:1319:8a2e:370:7348", 5678)); } |
### Question:
Utils { public static <T> String join(T[] strs, String separator) { return join(Arrays.asList(strs), separator); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testJoin() { assertEquals("", Utils.join(Collections.emptyList(), ",")); assertEquals("1", Utils.join(Arrays.asList("1"), ",")); assertEquals("1,2,3", Utils.join(Arrays.asList(1, 2, 3), ",")); } |
### Question:
Utils { public static int abs(int n) { return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testAbs() { assertEquals(0, Utils.abs(Integer.MIN_VALUE)); assertEquals(10, Utils.abs(-10)); assertEquals(10, Utils.abs(10)); assertEquals(0, Utils.abs(0)); assertEquals(1, Utils.abs(-1)); } |
### Question:
Utils { public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { byte[] dest = new byte[length]; if (buffer.hasArray()) { System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); } else { buffer.mark(); buffer.position(offset); buffer.get(dest, 0, length); buffer.reset(); } return dest; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testReadBytes() { byte[] myvar = "Any String you want".getBytes(); ByteBuffer buffer = ByteBuffer.allocate(myvar.length); buffer.put(myvar); buffer.rewind(); this.subTest(buffer); buffer = ByteBuffer.wrap(myvar).asReadOnlyBuffer(); this.subTest(buffer); } |
### Question:
Utils { public static long min(long first, long ... rest) { long min = first; for (long r : rest) { if (r < min) min = r; } return min; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }### Answer:
@Test public void testMin() { assertEquals(1, Utils.min(1)); assertEquals(1, Utils.min(1, 2, 3)); assertEquals(1, Utils.min(2, 1, 3)); assertEquals(1, Utils.min(2, 3, 1)); } |
### Question:
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); int exitCode(); Process process(); static String execCommand(String ... cmd); static String execCommand(String[] cmd, long timeout); }### Answer:
@Test public void testEchoHello() throws Exception { assumeTrue(!OperatingSystem.IS_WINDOWS); String output = Shell.execCommand("echo", "hello"); assertEquals("hello\n", output); }
@Test public void testHeadDevZero() throws Exception { assumeTrue(!OperatingSystem.IS_WINDOWS); final int length = 100000; String output = Shell.execCommand("head", "-c", Integer.toString(length), "/dev/zero"); assertEquals(length, output.length()); } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, timestamp, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test public void testWriteEmptyHeader() { long producerId = 23423L; short producerEpoch = 145; int baseSequence = 983; long baseOffset = 15L; long lastOffset = 37; int partitionLeaderEpoch = 15; long timestamp = System.currentTimeMillis(); for (TimestampType timestampType : Arrays.asList(TimestampType.CREATE_TIME, TimestampType.LOG_APPEND_TIME)) { for (boolean isTransactional : Arrays.asList(true, false)) { for (boolean isControlBatch : Arrays.asList(true, false)) { ByteBuffer buffer = ByteBuffer.allocate(2048); DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.CURRENT_MAGIC_VALUE, producerId, producerEpoch, baseSequence, baseOffset, lastOffset, partitionLeaderEpoch, timestampType, timestamp, isTransactional, isControlBatch); buffer.flip(); DefaultRecordBatch batch = new DefaultRecordBatch(buffer); assertEquals(producerId, batch.producerId()); assertEquals(producerEpoch, batch.producerEpoch()); assertEquals(baseSequence, batch.baseSequence()); assertEquals(baseSequence + ((int) (lastOffset - baseOffset)), batch.lastSequence()); assertEquals(baseOffset, batch.baseOffset()); assertEquals(lastOffset, batch.lastOffset()); assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch()); assertEquals(isTransactional, batch.isTransactional()); assertEquals(timestampType, batch.timestampType()); assertEquals(timestamp, batch.maxTimestamp()); assertEquals(isControlBatch, batch.isControlBatch()); } } } } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test public void testSizeInBytes() { Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null) }; long timestamp = System.currentTimeMillis(); SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord(timestamp, "key".getBytes(), "value".getBytes()), new SimpleRecord(timestamp + 30000, null, "value".getBytes()), new SimpleRecord(timestamp + 60000, "key".getBytes(), null), new SimpleRecord(timestamp + 60000, "key".getBytes(), "value".getBytes(), headers) }; int actualSize = MemoryRecords.withRecords(CompressionType.NONE, records).sizeInBytes(); assertEquals(actualSize, DefaultRecordBatch.sizeInBytes(Arrays.asList(records))); } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooManyNonCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.NONE, 5); for (Record record: batch) { record.isValid(); } }
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooLittleNonCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.NONE, 2); for (Record record: batch) { record.isValid(); } }
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooManyCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.GZIP, 5); for (Record record: batch) { record.isValid(); } }
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooLittleCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.GZIP, 2); for (Record record: batch) { record.isValid(); } } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test public void testSetLastOffset() { SimpleRecord[] simpleRecords = new SimpleRecord[] { new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes()) }; MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, simpleRecords); long lastOffset = 500L; long firstOffset = lastOffset - simpleRecords.length + 1; DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setLastOffset(lastOffset); assertEquals(lastOffset, batch.lastOffset()); assertEquals(firstOffset, batch.baseOffset()); assertTrue(batch.isValid()); List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator()); assertEquals(1, recordBatches.size()); assertEquals(lastOffset, recordBatches.get(0).lastOffset()); long offset = firstOffset; for (Record record : records.records()) assertEquals(offset++, record.offset()); } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test public void testSetPartitionLeaderEpoch() { MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes())); int leaderEpoch = 500; DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setPartitionLeaderEpoch(leaderEpoch); assertEquals(leaderEpoch, batch.partitionLeaderEpoch()); assertTrue(batch.isValid()); List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator()); assertEquals(1, recordBatches.size()); assertEquals(leaderEpoch, recordBatches.get(0).partitionLeaderEpoch()); } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test(expected = IllegalArgumentException.class) public void testSetNoTimestampTypeNotAllowed() { MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes())); DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setMaxTimestamp(TimestampType.NO_TIMESTAMP_TYPE, RecordBatch.NO_TIMESTAMP); } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test public void testReadAndWriteControlBatch() { long producerId = 1L; short producerEpoch = 0; int coordinatorEpoch = 15; ByteBuffer buffer = ByteBuffer.allocate(128); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, producerId, producerEpoch, RecordBatch.NO_SEQUENCE, true, true, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.remaining()); EndTransactionMarker marker = new EndTransactionMarker(ControlRecordType.COMMIT, coordinatorEpoch); builder.appendEndTxnMarker(System.currentTimeMillis(), marker); MemoryRecords records = builder.build(); List<MutableRecordBatch> batches = TestUtils.toList(records.batches()); assertEquals(1, batches.size()); MutableRecordBatch batch = batches.get(0); assertTrue(batch.isControlBatch()); List<Record> logRecords = TestUtils.toList(records.records()); assertEquals(1, logRecords.size()); Record commitRecord = logRecords.get(0); assertEquals(marker, EndTransactionMarker.deserialize(commitRecord)); } |
### Question:
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { static int incrementSequence(int baseSequence, int increment) { if (baseSequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - baseSequence) - 1; return baseSequence + increment; } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }### Answer:
@Test public void testIncrementSequence() { assertEquals(10, DefaultRecordBatch.incrementSequence(5, 5)); assertEquals(0, DefaultRecordBatch.incrementSequence(Integer.MAX_VALUE, 1)); assertEquals(4, DefaultRecordBatch.incrementSequence(Integer.MAX_VALUE - 5, 10)); } |
### Question:
FileRecords extends AbstractRecords implements Closeable { public FileChannel channel() { return channel; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }### Answer:
@Test public void testIterationDoesntChangePosition() throws IOException { long position = fileRecords.channel().position(); Iterator<Record> records = fileRecords.records().iterator(); for (byte[] value : values) { assertTrue(records.hasNext()); assertEquals(records.next().value(), ByteBuffer.wrap(value)); } assertEquals(position, fileRecords.channel().position()); } |
### Question:
FileRecords extends AbstractRecords implements Closeable { public FileRecords read(int position, int size) throws IOException { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size); final int end; if (this.start + position + size < 0) end = sizeInBytes(); else end = Math.min(this.start + position + size, sizeInBytes()); return new FileRecords(file, channel, this.start + position, end, true); } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }### Answer:
@Test public void testRead() throws IOException { FileRecords read = fileRecords.read(0, fileRecords.sizeInBytes()); TestUtils.checkEquals(fileRecords.batches(), read.batches()); List<RecordBatch> items = batches(read); RecordBatch second = items.get(1); read = fileRecords.read(second.sizeInBytes(), fileRecords.sizeInBytes()); assertEquals("Try a read starting from the second message", items.subList(1, 3), batches(read)); read = fileRecords.read(second.sizeInBytes(), second.sizeInBytes()); assertEquals("Try a read of a single message starting from the second message", Collections.singletonList(second), batches(read)); } |
### Question:
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }### Answer:
@Test public void testTruncateNotCalledIfSizeIsSameAsTargetSize() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); EasyMock.expect(channelMock.size()).andReturn(42L).atLeastOnce(); EasyMock.expect(channelMock.position(42L)).andReturn(null); EasyMock.replay(channelMock); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(42); EasyMock.verify(channelMock); }
@Test public void testTruncateNotCalledIfSizeIsBiggerThanTargetSize() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); EasyMock.expect(channelMock.size()).andReturn(42L).atLeastOnce(); EasyMock.expect(channelMock.position(42L)).andReturn(null); EasyMock.replay(channelMock); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); try { fileRecords.truncateTo(43); fail("Should throw KafkaException"); } catch (KafkaException e) { } EasyMock.verify(channelMock); }
@Test public void testTruncateIfSizeIsDifferentToTargetSize() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); EasyMock.expect(channelMock.size()).andReturn(42L).atLeastOnce(); EasyMock.expect(channelMock.position(42L)).andReturn(null).once(); EasyMock.expect(channelMock.truncate(23L)).andReturn(null).once(); EasyMock.replay(channelMock); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(23); EasyMock.verify(channelMock); } |
### Question:
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }### Answer:
@Test public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() throws Exception { final MemoryRecords memoryRecords = MemoryRecords.withTransactionalRecords(0, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord); final ProduceRequest request = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) -1, 10, Collections.singletonMap( new TopicPartition("topic", 1), memoryRecords)).build(); assertTrue(request.isTransactional()); }
@Test public void shouldNotBeFlaggedAsTransactionalWhenNoRecords() throws Exception { final ProduceRequest request = createNonIdempotentNonTransactionalRecords(); assertFalse(request.isTransactional()); }
@Test public void shouldNotBeFlaggedAsIdempotentWhenRecordsNotIdempotent() throws Exception { final ProduceRequest request = createNonIdempotentNonTransactionalRecords(); assertFalse(request.isTransactional()); } |
### Question:
ProduceRequest extends AbstractRequest { public boolean isIdempotent() { return idempotent; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }### Answer:
@Test public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() throws Exception { final MemoryRecords memoryRecords = MemoryRecords.withIdempotentRecords(1, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord); final ProduceRequest request = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) -1, 10, Collections.singletonMap( new TopicPartition("topic", 1), memoryRecords)).build(); assertTrue(request.isIdempotent()); } |
### Question:
StreamsConfig extends AbstractConfig { public Serde defaultKeySerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), true); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure key serde %s", get(DEFAULT_KEY_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }### Answer:
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfKeySerdeConfigFails() throws Exception { props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, MisconfiguredSerde.class); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.defaultKeySerde(); } |
### Question:
StreamsConfig extends AbstractConfig { public Serde defaultValueSerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure value serde %s", get(DEFAULT_VALUE_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }### Answer:
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfValueSerdeConfigFails() throws Exception { props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, MisconfiguredSerde.class); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.defaultValueSerde(); } |
### Question:
JsonConverter implements Converter { @Override public void configure(Map<String, ?> configs, boolean isKey) { Object enableConfigsVal = configs.get(SCHEMAS_ENABLE_CONFIG); if (enableConfigsVal != null) enableSchemas = enableConfigsVal.toString().equals("true"); serializer.configure(configs, isKey); deserializer.configure(configs, isKey); Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG); if (cacheSizeVal != null) cacheSize = Integer.parseInt((String) cacheSizeVal); fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<Schema, ObjectNode>(cacheSize)); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<JsonNode, Schema>(cacheSize)); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }### Answer:
@Test public void testJsonSchemaCacheSizeFromConfigFile() throws URISyntaxException, IOException { URL url = getClass().getResource("/connect-test.properties"); File propFile = new File(url.toURI()); String workerPropsFile = propFile.getAbsolutePath(); Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap(); JsonConverter rc = new JsonConverter(); rc.configure(workerProps, false); } |
### Question:
SerializedKeyValueIterator implements KeyValueIterator<K, V> { @Override public KeyValue<K, V> next() { if (!hasNext()) { throw new NoSuchElementException(); } final KeyValue<Bytes, byte[]> next = bytesIterator.next(); return KeyValue.pair(serdes.keyFrom(next.key.get()), serdes.valueFrom(next.value)); } SerializedKeyValueIterator(final KeyValueIterator<Bytes, byte[]> bytesIterator,
final StateSerdes<K, V> serdes); @Override void close(); @Override K peekNextKey(); @Override boolean hasNext(); @Override KeyValue<K, V> next(); @Override void remove(); }### Answer:
@Test public void shouldReturnNextValueWhenItExists() throws Exception { assertThat(serializedKeyValueIterator.next(), equalTo(KeyValue.pair("hi", "there"))); assertThat(serializedKeyValueIterator.next(), equalTo(KeyValue.pair("hello", "world"))); }
@Test public void shouldThrowNoSuchElementOnNextWhenIteratorExhausted() throws Exception { advanceIteratorToEnd(); try { serializedKeyValueIterator.next(); fail("Expected NoSuchElementException on exhausted iterator"); } catch (final NoSuchElementException nse) { } }
@Test public void shouldReturnNextValueWhenItExists() { assertThat(serializedKeyValueIterator.next(), equalTo(KeyValue.pair("hi", "there"))); assertThat(serializedKeyValueIterator.next(), equalTo(KeyValue.pair("hello", "world"))); }
@Test public void shouldThrowNoSuchElementOnNextWhenIteratorExhausted() { advanceIteratorToEnd(); try { serializedKeyValueIterator.next(); fail("Expected NoSuchElementException on exhausted iterator"); } catch (final NoSuchElementException nse) { } } |
### Question:
SerializedKeyValueIterator implements KeyValueIterator<K, V> { @Override public boolean hasNext() { return bytesIterator.hasNext(); } SerializedKeyValueIterator(final KeyValueIterator<Bytes, byte[]> bytesIterator,
final StateSerdes<K, V> serdes); @Override void close(); @Override K peekNextKey(); @Override boolean hasNext(); @Override KeyValue<K, V> next(); @Override void remove(); }### Answer:
@Test public void shouldReturnFalseOnHasNextWhenNoMoreResults() throws Exception { advanceIteratorToEnd(); assertFalse(serializedKeyValueIterator.hasNext()); }
@Test public void shouldReturnTrueOnHasNextWhenMoreResults() { assertTrue(serializedKeyValueIterator.hasNext()); }
@Test public void shouldReturnFalseOnHasNextWhenNoMoreResults() { advanceIteratorToEnd(); assertFalse(serializedKeyValueIterator.hasNext()); } |
### Question:
SerializedKeyValueIterator implements KeyValueIterator<K, V> { @Override public void remove() { throw new UnsupportedOperationException("remove() is not supported in " + getClass().getName()); } SerializedKeyValueIterator(final KeyValueIterator<Bytes, byte[]> bytesIterator,
final StateSerdes<K, V> serdes); @Override void close(); @Override K peekNextKey(); @Override boolean hasNext(); @Override KeyValue<K, V> next(); @Override void remove(); }### Answer:
@Test(expected = UnsupportedOperationException.class) public void shouldThrowUnsupportedOperationOnRemove() throws Exception { serializedKeyValueIterator.remove(); }
@Test(expected = UnsupportedOperationException.class) public void shouldThrowUnsupportedOperationOnRemove() { serializedKeyValueIterator.remove(); } |
### Question:
SessionKeySchema implements SegmentedBytesStore.KeySchema { @Override public HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to) { return new HasNextCondition() { @Override public boolean hasNext(final KeyValueIterator<Bytes, ?> iterator) { while (iterator.hasNext()) { final Bytes bytes = iterator.peekNextKey(); final Windowed<Bytes> windowedKey = SessionKeySerde.fromBytes(bytes); if (windowedKey.key().compareTo(binaryKeyFrom) >= 0 && windowedKey.key().compareTo(binaryKeyTo) <= 0 && windowedKey.window().end() >= from && windowedKey.window().start() <= to) { return true; } iterator.next(); } return false; } }; } @Override void init(final String topic); @Override Bytes upperRangeFixedSize(final Bytes key, final long to); @Override Bytes lowerRangeFixedSize(final Bytes key, final long from); @Override Bytes upperRange(Bytes key, long to); @Override Bytes lowerRange(Bytes key, long from); @Override long segmentTimestamp(final Bytes key); @Override HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to); @Override List<Segment> segmentsToSearch(final Segments segments, final long from, final long to); }### Answer:
@Test public void shouldFetchExactKeysSkippingLongerKeys() throws Exception { final Bytes key = Bytes.wrap(new byte[]{0}); final List<Integer> result = getValues(sessionKeySchema.hasNextCondition(key, key, 0, Long.MAX_VALUE)); assertThat(result, equalTo(Arrays.asList(2, 4))); }
@Test public void shouldFetchExactKeySkippingShorterKeys() throws Exception { final Bytes key = Bytes.wrap(new byte[]{0, 0}); final HasNextCondition hasNextCondition = sessionKeySchema.hasNextCondition(key, key, 0, Long.MAX_VALUE); final List<Integer> results = getValues(hasNextCondition); assertThat(results, equalTo(Arrays.asList(1, 5))); } |
### Question:
SessionKeySchema implements SegmentedBytesStore.KeySchema { @Override public Bytes upperRange(Bytes key, long to) { final byte[] maxSuffix = ByteBuffer.allocate(SUFFIX_SIZE) .putLong(to) .putLong(to) .array(); return OrderedBytes.upperRange(key, maxSuffix); } @Override void init(final String topic); @Override Bytes upperRangeFixedSize(final Bytes key, final long to); @Override Bytes lowerRangeFixedSize(final Bytes key, final long from); @Override Bytes upperRange(Bytes key, long to); @Override Bytes lowerRange(Bytes key, long from); @Override long segmentTimestamp(final Bytes key); @Override HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to); @Override List<Segment> segmentsToSearch(final Segments segments, final long from, final long to); }### Answer:
@Test public void testUpperBoundWithLargeTimestamps() throws Exception { Bytes upper = sessionKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), Long.MAX_VALUE); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( SessionKeySerde.bytesToBinary( new Windowed<>( Bytes.wrap(new byte[]{0xA}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE)) ) ) >= 0 ); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( SessionKeySerde.bytesToBinary( new Windowed<>( Bytes.wrap(new byte[]{0xA, 0xB}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE)) ) ) >= 0 ); assertThat(upper, equalTo(SessionKeySerde.bytesToBinary( new Windowed<>(Bytes.wrap(new byte[]{0xA}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE)))) ); }
@Test public void testUpperBoundWithKeyBytesLargerThanFirstTimestampByte() throws Exception { Bytes upper = sessionKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, (byte) 0x8F, (byte) 0x9F}), Long.MAX_VALUE); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( SessionKeySerde.bytesToBinary( new Windowed<>( Bytes.wrap(new byte[]{0xA, (byte) 0x8F}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE)) ) ) >= 0 ); assertThat(upper, equalTo(SessionKeySerde.bytesToBinary( new Windowed<>(Bytes.wrap(new byte[]{0xA, (byte) 0x8F, (byte) 0x9F}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE)))) ); }
@Test public void testUpperBoundWithZeroTimestamp() throws Exception { Bytes upper = sessionKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), 0); assertThat(upper, equalTo(SessionKeySerde.bytesToBinary( new Windowed<>(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), new SessionWindow(0, 0)))) ); } |
### Question:
SessionKeySchema implements SegmentedBytesStore.KeySchema { @Override public Bytes lowerRange(Bytes key, long from) { return OrderedBytes.lowerRange(key, MIN_SUFFIX); } @Override void init(final String topic); @Override Bytes upperRangeFixedSize(final Bytes key, final long to); @Override Bytes lowerRangeFixedSize(final Bytes key, final long from); @Override Bytes upperRange(Bytes key, long to); @Override Bytes lowerRange(Bytes key, long from); @Override long segmentTimestamp(final Bytes key); @Override HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to); @Override List<Segment> segmentsToSearch(final Segments segments, final long from, final long to); }### Answer:
@Test public void testLowerBoundWithZeroTimestamp() throws Exception { Bytes lower = sessionKeySchema.lowerRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), 0); assertThat(lower, equalTo(SessionKeySerde.bytesToBinary(new Windowed<>(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), new SessionWindow(0, 0))))); }
@Test public void testLowerBoundMatchesTrailingZeros() throws Exception { Bytes lower = sessionKeySchema.lowerRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), Long.MAX_VALUE); assertThat( "appending zeros to key should still be in range", lower.compareTo( SessionKeySerde.bytesToBinary( new Windowed<>( Bytes.wrap(new byte[]{0xA, 0xB, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}), new SessionWindow(Long.MAX_VALUE, Long.MAX_VALUE)) ) ) < 0 ); assertThat(lower, equalTo(SessionKeySerde.bytesToBinary(new Windowed<>(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), new SessionWindow(0, 0))))); } |
### Question:
CompositeReadOnlySessionStore implements ReadOnlySessionStore<K, V> { private KeyValueIterator<Windowed<K>, V> fetch(Fetcher<K, V> fetcher) { final List<ReadOnlySessionStore<K, V>> stores = storeProvider.stores(storeName, queryableStoreType); for (final ReadOnlySessionStore<K, V> store : stores) { try { final KeyValueIterator<Windowed<K>, V> result = fetcher.fetch(store); if (!result.hasNext()) { result.close(); } else { return result; } } catch (final InvalidStateStoreException ise) { throw new InvalidStateStoreException("State store [" + storeName + "] is not available anymore" + " and may have been migrated to another instance; " + "please re-discover its location from the state metadata."); } } return KeyValueIterators.emptyIterator(); } CompositeReadOnlySessionStore(final StateStoreProvider storeProvider,
final QueryableStoreType<ReadOnlySessionStore<K, V>> queryableStoreType,
final String storeName); @Override KeyValueIterator<Windowed<K>, V> fetch(final K key); @Override KeyValueIterator<Windowed<K>, V> fetch(final K from, final K to); }### Answer:
@Test public void shouldFetchResulstFromUnderlyingSessionStore() throws Exception { underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(0, 0)), 1L); underlyingSessionStore.put(new Windowed<>("a", new SessionWindow(10, 10)), 2L); final List<KeyValue<Windowed<String>, Long>> results = toList(sessionStore.fetch("a")); assertEquals(Arrays.asList(KeyValue.pair(new Windowed<>("a", new SessionWindow(0, 0)), 1L), KeyValue.pair(new Windowed<>("a", new SessionWindow(10, 10)), 2L)), results); }
@Test public void shouldReturnEmptyIteratorIfNoData() throws Exception { final KeyValueIterator<Windowed<String>, Long> result = sessionStore.fetch("b"); assertFalse(result.hasNext()); }
@Test public void shouldFindValueForKeyWhenMultiStores() throws Exception { final ReadOnlySessionStoreStub<String, Long> secondUnderlying = new ReadOnlySessionStoreStub<>(); stubProviderTwo.addStore(storeName, secondUnderlying); final Windowed<String> keyOne = new Windowed<>("key-one", new SessionWindow(0, 0)); final Windowed<String> keyTwo = new Windowed<>("key-two", new SessionWindow(0, 0)); underlyingSessionStore.put(keyOne, 0L); secondUnderlying.put(keyTwo, 10L); final List<KeyValue<Windowed<String>, Long>> keyOneResults = toList(sessionStore.fetch("key-one")); final List<KeyValue<Windowed<String>, Long>> keyTwoResults = toList(sessionStore.fetch("key-two")); assertEquals(Collections.singletonList(KeyValue.pair(keyOne, 0L)), keyOneResults); assertEquals(Collections.singletonList(KeyValue.pair(keyTwo, 10L)), keyTwoResults); }
@Test public void shouldNotGetValueFromOtherStores() throws Exception { final Windowed<String> expectedKey = new Windowed<>("foo", new SessionWindow(0, 0)); otherUnderlyingStore.put(new Windowed<>("foo", new SessionWindow(10, 10)), 10L); underlyingSessionStore.put(expectedKey, 1L); final KeyValueIterator<Windowed<String>, Long> result = sessionStore.fetch("foo"); assertEquals(KeyValue.pair(expectedKey, 1L), result.next()); assertFalse(result.hasNext()); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowInvalidStateStoreExceptionOnRebalance() throws Exception { final CompositeReadOnlySessionStore<String, String> store = new CompositeReadOnlySessionStore<>(new StateStoreProviderStub(true), QueryableStoreTypes.<String, String>sessionStore(), "whateva"); store.fetch("a"); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowInvalidStateStoreExceptionIfFetchThrows() throws Exception { underlyingSessionStore.setOpen(false); underlyingSessionStore.fetch("key"); } |
### Question:
NamedCache { synchronized void evict() { if (tail == null) { return; } final LRUNode eldest = tail; currentSizeBytes -= eldest.size(); remove(eldest); cache.remove(eldest.key); if (eldest.entry.isDirty()) { flush(eldest); } } NamedCache(final String name, final StreamsMetrics metrics); long size(); }### Answer:
@Test public void shouldNotThrowNullPointerWhenCacheIsEmptyAndEvictionCalled() throws Exception { cache.evict(); } |
### Question:
NamedCache { synchronized void put(final Bytes key, final LRUCacheEntry value) { if (!value.isDirty() && dirtyKeys.contains(key)) { throw new IllegalStateException(String.format("Attempting to put a clean entry for key [%s] " + "into NamedCache [%s] when it already contains " + "a dirty entry for the same key", key, name)); } LRUNode node = cache.get(key); if (node != null) { numOverwrites++; currentSizeBytes -= node.size(); node.update(value); updateLRU(node); } else { node = new LRUNode(key, value); putHead(node); cache.put(key, node); } if (value.isDirty()) { dirtyKeys.remove(key); dirtyKeys.add(key); } currentSizeBytes += node.size(); } NamedCache(final String name, final StreamsMetrics metrics); long size(); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionWhenTryingToOverwriteDirtyEntryWithCleanEntry() throws Exception { cache.put(Bytes.wrap(new byte[]{0}), new LRUCacheEntry(new byte[]{10}, true, 0, 0, 0, "")); cache.put(Bytes.wrap(new byte[]{0}), new LRUCacheEntry(new byte[]{10}, false, 0, 0, 0, "")); } |
### Question:
NamedCache { synchronized LRUCacheEntry get(final Bytes key) { if (key == null) { return null; } final LRUNode node = getInternal(key); if (node == null) { return null; } updateLRU(node); return node.entry; } NamedCache(final String name, final StreamsMetrics metrics); long size(); }### Answer:
@Test public void shouldReturnNullIfKeyIsNull() throws Exception { assertNull(cache.get(null)); } |
### Question:
CachingWindowStore extends WrappedStateStore.AbstractStateStore implements WindowStore<K, V>, CachedStateStore<Windowed<K>, V> { @Override public synchronized WindowStoreIterator<V> fetch(final K key, final long timeFrom, final long timeTo) { validateStoreOpen(); final Bytes keyBytes = Bytes.wrap(serdes.rawKey(key)); final WindowStoreIterator<byte[]> underlyingIterator = underlying.fetch(keyBytes, timeFrom, timeTo); final Bytes cacheKeyFrom = cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(keyBytes, timeFrom)); final Bytes cacheKeyTo = cacheFunction.cacheKey(keySchema.upperRangeFixedSize(keyBytes, timeTo)); final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(name, cacheKeyFrom, cacheKeyTo); final HasNextCondition hasNextCondition = keySchema.hasNextCondition(keyBytes, keyBytes, timeFrom, timeTo); final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator = new FilteredCacheIterator( cacheIterator, hasNextCondition, cacheFunction ); return new MergedSortedCacheWindowStoreIterator<>(filteredCacheIterator, underlyingIterator, new StateSerdes<>(serdes.topic(), Serdes.Long(), serdes.valueSerde())); } CachingWindowStore(final WindowStore<Bytes, byte[]> underlying,
final Serde<K> keySerde,
final Serde<V> valueSerde,
final long windowSize,
final long segmentInterval); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); void setFlushListener(CacheFlushListener<Windowed<K>, V> flushListener); @Override synchronized void flush(); @Override void close(); @Override synchronized void put(final K key, final V value); @Override synchronized void put(final K key, final V value, final long timestamp); @Override synchronized WindowStoreIterator<V> fetch(final K key, final long timeFrom, final long timeTo); @Override KeyValueIterator<Windowed<K>, V> fetch(final K from, final K to, final long timeFrom, final long timeTo); }### Answer:
@Test public void shouldFlushEvictedItemsIntoUnderlyingStore() throws Exception { int added = addItemsToCache(); final KeyValueIterator<Bytes, byte[]> iter = underlying.fetch(Bytes.wrap("0".getBytes()), DEFAULT_TIMESTAMP, DEFAULT_TIMESTAMP); final KeyValue<Bytes, byte[]> next = iter.next(); assertEquals(DEFAULT_TIMESTAMP, keySchema.segmentTimestamp(next.key)); assertArrayEquals("0".getBytes(), next.value); assertFalse(iter.hasNext()); assertEquals(added - 1, cache.size()); } |
### Question:
CachingSessionStore extends WrappedStateStore.AbstractStateStore implements SessionStore<K, AGG>, CachedStateStore<Windowed<K>, AGG> { @Override public KeyValueIterator<Windowed<K>, AGG> fetch(final K key) { return findSessions(key, 0, Long.MAX_VALUE); } CachingSessionStore(final SessionStore<Bytes, byte[]> bytesStore,
final Serde<K> keySerde,
final Serde<AGG> aggSerde,
final long segmentInterval); void init(final ProcessorContext context, final StateStore root); KeyValueIterator<Windowed<K>, AGG> findSessions(final K key,
final long earliestSessionEndTime,
final long latestSessionStartTime); @Override KeyValueIterator<Windowed<K>, AGG> findSessions(K keyFrom,
K keyTo,
long earliestSessionEndTime,
long latestSessionStartTime); @Override void remove(final Windowed<K> sessionKey); @Override void put(final Windowed<K> key, AGG value); @Override KeyValueIterator<Windowed<K>, AGG> fetch(final K key); @Override KeyValueIterator<Windowed<K>, AGG> fetch(K from, K to); void flush(); void close(); void setFlushListener(CacheFlushListener<Windowed<K>, AGG> flushListener); }### Answer:
@Test public void shouldFlushItemsToStoreOnEviction() throws Exception { final List<KeyValue<Windowed<String>, Long>> added = addSessionsUntilOverflow("a", "b", "c", "d"); assertEquals(added.size() - 1, cache.size()); final KeyValueIterator<Bytes, byte[]> iterator = underlying.fetch(Bytes.wrap(added.get(0).key.key().getBytes()), 0, 0); final KeyValue<Bytes, byte[]> next = iterator.next(); assertEquals(added.get(0).key, SessionKeySerde.from(next.key.get(), Serdes.String().deserializer(), "dummy")); assertArrayEquals(serdes.rawValue(added.get(0).value), next.value); } |
### Question:
CachingSessionStore extends WrappedStateStore.AbstractStateStore implements SessionStore<K, AGG>, CachedStateStore<Windowed<K>, AGG> { public KeyValueIterator<Windowed<K>, AGG> findSessions(final K key, final long earliestSessionEndTime, final long latestSessionStartTime) { validateStoreOpen(); final Bytes binarySessionId = Bytes.wrap(serdes.rawKey(key)); final Bytes cacheKeyFrom = cacheFunction.cacheKey(keySchema.lowerRangeFixedSize(binarySessionId, earliestSessionEndTime)); final Bytes cacheKeyTo = cacheFunction.cacheKey(keySchema.upperRangeFixedSize(binarySessionId, latestSessionStartTime)); final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(cacheName, cacheKeyFrom, cacheKeyTo); final KeyValueIterator<Windowed<Bytes>, byte[]> storeIterator = bytesStore.findSessions( binarySessionId, earliestSessionEndTime, latestSessionStartTime ); final HasNextCondition hasNextCondition = keySchema.hasNextCondition(binarySessionId, binarySessionId, earliestSessionEndTime, latestSessionStartTime); final PeekingKeyValueIterator<Bytes, LRUCacheEntry> filteredCacheIterator = new FilteredCacheIterator(cacheIterator, hasNextCondition, cacheFunction); return new MergedSortedCacheSessionStoreIterator<>(filteredCacheIterator, storeIterator, serdes, cacheFunction); } CachingSessionStore(final SessionStore<Bytes, byte[]> bytesStore,
final Serde<K> keySerde,
final Serde<AGG> aggSerde,
final long segmentInterval); void init(final ProcessorContext context, final StateStore root); KeyValueIterator<Windowed<K>, AGG> findSessions(final K key,
final long earliestSessionEndTime,
final long latestSessionStartTime); @Override KeyValueIterator<Windowed<K>, AGG> findSessions(K keyFrom,
K keyTo,
long earliestSessionEndTime,
long latestSessionStartTime); @Override void remove(final Windowed<K> sessionKey); @Override void put(final Windowed<K> key, AGG value); @Override KeyValueIterator<Windowed<K>, AGG> fetch(final K key); @Override KeyValueIterator<Windowed<K>, AGG> fetch(K from, K to); void flush(); void close(); void setFlushListener(CacheFlushListener<Windowed<K>, AGG> flushListener); }### Answer:
@Test public void shouldQueryItemsInCacheAndStore() throws Exception { final List<KeyValue<Windowed<String>, Long>> added = addSessionsUntilOverflow("a"); final KeyValueIterator<Windowed<String>, Long> iterator = cachingStore.findSessions("a", 0, added.size() * 10); final List<KeyValue<Windowed<String>, Long>> actual = toList(iterator); assertEquals(added, actual); } |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.