method2testcases
stringlengths 118
6.63k
|
---|
### Question:
SegmentedCacheFunction implements CacheFunction { @Override public Bytes key(Bytes cacheKey) { return Bytes.wrap(bytesFromCacheKey(cacheKey)); } SegmentedCacheFunction(KeySchema keySchema, long segmentInterval); @Override Bytes key(Bytes cacheKey); @Override Bytes cacheKey(Bytes key); long segmentId(Bytes key); }### Answer:
@Test public void key() throws Exception { assertThat( cacheFunction.key(THE_CACHE_KEY), equalTo(THE_KEY) ); } |
### Question:
SegmentedCacheFunction implements CacheFunction { @Override public Bytes cacheKey(Bytes key) { final byte[] keyBytes = key.get(); ByteBuffer buf = ByteBuffer.allocate(SEGMENT_ID_BYTES + keyBytes.length); buf.putLong(segmentId(key)).put(keyBytes); return Bytes.wrap(buf.array()); } SegmentedCacheFunction(KeySchema keySchema, long segmentInterval); @Override Bytes key(Bytes cacheKey); @Override Bytes cacheKey(Bytes key); long segmentId(Bytes key); }### Answer:
@Test public void cacheKey() throws Exception { final long segmentId = TIMESTAMP / SEGMENT_INTERVAL; final Bytes actualCacheKey = cacheFunction.cacheKey(THE_KEY); final ByteBuffer buffer = ByteBuffer.wrap(actualCacheKey.get()); assertThat(buffer.getLong(), equalTo(segmentId)); byte[] actualKey = new byte[buffer.remaining()]; buffer.get(actualKey); assertThat(Bytes.wrap(actualKey), equalTo(THE_KEY)); } |
### Question:
SegmentedCacheFunction implements CacheFunction { int compareSegmentedKeys(Bytes cacheKey, Bytes storeKey) { long storeSegmentId = segmentId(storeKey); long cacheSegmentId = ByteBuffer.wrap(cacheKey.get()).getLong(); final int segmentCompare = Long.compare(cacheSegmentId, storeSegmentId); if (segmentCompare == 0) { byte[] cacheKeyBytes = cacheKey.get(); byte[] storeKeyBytes = storeKey.get(); return Bytes.BYTES_LEXICO_COMPARATOR.compare( cacheKeyBytes, SEGMENT_ID_BYTES, cacheKeyBytes.length - SEGMENT_ID_BYTES, storeKeyBytes, 0, storeKeyBytes.length ); } else { return segmentCompare; } } SegmentedCacheFunction(KeySchema keySchema, long segmentInterval); @Override Bytes key(Bytes cacheKey); @Override Bytes cacheKey(Bytes key); long segmentId(Bytes key); }### Answer:
@Test public void compareSegmentedKeys() throws Exception { assertThat( "same key in same segment should be ranked the same", cacheFunction.compareSegmentedKeys( cacheFunction.cacheKey(THE_KEY), THE_KEY ) == 0 ); final Bytes sameKeyInPriorSegment = WindowStoreUtils.toBinaryKey(new byte[]{0xA, 0xB, 0xC}, 1234, 42); assertThat( "same keys in different segments should be ordered according to segment", cacheFunction.compareSegmentedKeys( cacheFunction.cacheKey(sameKeyInPriorSegment), THE_KEY ) < 0 ); assertThat( "same keys in different segments should be ordered according to segment", cacheFunction.compareSegmentedKeys( cacheFunction.cacheKey(THE_KEY), sameKeyInPriorSegment ) > 0 ); final Bytes lowerKeyInSameSegment = WindowStoreUtils.toBinaryKey(new byte[]{0xA, 0xB, 0xB}, TIMESTAMP - 1, 0); assertThat( "different keys in same segments should be ordered according to key", cacheFunction.compareSegmentedKeys( cacheFunction.cacheKey(THE_KEY), lowerKeyInSameSegment ) > 0 ); assertThat( "different keys in same segments should be ordered according to key", cacheFunction.compareSegmentedKeys( cacheFunction.cacheKey(lowerKeyInSameSegment), THE_KEY ) < 0 ); } |
### Question:
ThreadCache { public LRUCacheEntry delete(final String namespace, final Bytes key) { final NamedCache cache = getCache(namespace); if (cache == null) { return null; } return cache.delete(key); } ThreadCache(final String name, long maxCacheSizeBytes, final StreamsMetrics metrics); long puts(); long gets(); long evicts(); long flushes(); void addDirtyEntryFlushListener(final String namespace, DirtyEntryFlushListener listener); void flush(final String namespace); LRUCacheEntry get(final String namespace, Bytes key); void put(final String namespace, Bytes key, LRUCacheEntry value); LRUCacheEntry putIfAbsent(final String namespace, Bytes key, LRUCacheEntry value); void putAll(final String namespace, final List<KeyValue<Bytes, LRUCacheEntry>> entries); LRUCacheEntry delete(final String namespace, final Bytes key); MemoryLRUCacheBytesIterator range(final String namespace, final Bytes from, final Bytes to); MemoryLRUCacheBytesIterator all(final String namespace); long size(); }### Answer:
@Test public void shouldNotBlowUpOnNonExistentNamespaceWhenDeleting() throws Exception { final ThreadCache cache = new ThreadCache("testCache", 10000L, new MockStreamsMetrics(new Metrics())); assertNull(cache.delete("name", Bytes.wrap(new byte[]{1}))); } |
### Question:
ThreadCache { public MemoryLRUCacheBytesIterator range(final String namespace, final Bytes from, final Bytes to) { final NamedCache cache = getCache(namespace); if (cache == null) { return new MemoryLRUCacheBytesIterator(Collections.<Bytes>emptyIterator(), new NamedCache(namespace, this.metrics)); } return new MemoryLRUCacheBytesIterator(cache.keyRange(from, to), cache); } ThreadCache(final String name, long maxCacheSizeBytes, final StreamsMetrics metrics); long puts(); long gets(); long evicts(); long flushes(); void addDirtyEntryFlushListener(final String namespace, DirtyEntryFlushListener listener); void flush(final String namespace); LRUCacheEntry get(final String namespace, Bytes key); void put(final String namespace, Bytes key, LRUCacheEntry value); LRUCacheEntry putIfAbsent(final String namespace, Bytes key, LRUCacheEntry value); void putAll(final String namespace, final List<KeyValue<Bytes, LRUCacheEntry>> entries); LRUCacheEntry delete(final String namespace, final Bytes key); MemoryLRUCacheBytesIterator range(final String namespace, final Bytes from, final Bytes to); MemoryLRUCacheBytesIterator all(final String namespace); long size(); }### Answer:
@Test(expected = NoSuchElementException.class) public void shouldThrowIfNoPeekNextKey() throws Exception { final ThreadCache cache = new ThreadCache("testCache", 10000L, new MockStreamsMetrics(new Metrics())); final ThreadCache.MemoryLRUCacheBytesIterator iterator = cache.range("", Bytes.wrap(new byte[]{0}), Bytes.wrap(new byte[]{1})); iterator.peekNextKey(); }
@Test public void shouldReturnFalseIfNoNextKey() throws Exception { final ThreadCache cache = new ThreadCache("testCache", 10000L, new MockStreamsMetrics(new Metrics())); final ThreadCache.MemoryLRUCacheBytesIterator iterator = cache.range("", Bytes.wrap(new byte[]{0}), Bytes.wrap(new byte[]{1})); assertFalse(iterator.hasNext()); } |
### Question:
Segments { long segmentId(long timestamp) { return timestamp / segmentInterval; } Segments(final String name, final long retentionPeriod, final int numSegments); void close(); }### Answer:
@Test public void shouldGetSegmentIdsFromTimestamp() throws Exception { assertEquals(0, segments.segmentId(0)); assertEquals(1, segments.segmentId(60000)); assertEquals(2, segments.segmentId(120000)); assertEquals(3, segments.segmentId(180000)); }
@Test public void shouldBaseSegmentIntervalOnRetentionAndNumSegments() throws Exception { final Segments segments = new Segments("test", 8 * 60 * 1000, 5); assertEquals(0, segments.segmentId(0)); assertEquals(0, segments.segmentId(60000)); assertEquals(1, segments.segmentId(120000)); } |
### Question:
Segments { String segmentName(long segmentId) { return name + "-" + formatter.format(new Date(segmentId * segmentInterval)); } Segments(final String name, final long retentionPeriod, final int numSegments); void close(); }### Answer:
@Test public void shouldGetSegmentNameFromId() throws Exception { assertEquals("test-197001010000", segments.segmentName(0)); assertEquals("test-197001010001", segments.segmentName(1)); assertEquals("test-197001010002", segments.segmentName(2)); } |
### Question:
Segments { Segment getOrCreateSegment(final long segmentId, final ProcessorContext context) { if (segmentId > maxSegmentId - numSegments) { final long key = segmentId % numSegments; final Segment segment = segments.get(key); if (!isSegment(segment, segmentId)) { cleanup(segmentId); } Segment newSegment = new Segment(segmentName(segmentId), name, segmentId); Segment previousSegment = segments.putIfAbsent(key, newSegment); if (previousSegment == null) { newSegment.openDB(context); maxSegmentId = segmentId > maxSegmentId ? segmentId : maxSegmentId; if (minSegmentId == Long.MAX_VALUE) { minSegmentId = maxSegmentId; } } return previousSegment == null ? newSegment : previousSegment; } else { return null; } } Segments(final String name, final long retentionPeriod, final int numSegments); void close(); }### Answer:
@Test public void shouldCreateSegments() throws Exception { final Segment segment1 = segments.getOrCreateSegment(0, context); final Segment segment2 = segments.getOrCreateSegment(1, context); final Segment segment3 = segments.getOrCreateSegment(2, context); assertTrue(new File(context.stateDir(), "test/test-197001010000").isDirectory()); assertTrue(new File(context.stateDir(), "test/test-197001010001").isDirectory()); assertTrue(new File(context.stateDir(), "test/test-197001010002").isDirectory()); assertEquals(true, segment1.isOpen()); assertEquals(true, segment2.isOpen()); assertEquals(true, segment3.isOpen()); }
@Test public void shouldNotCreateSegmentThatIsAlreadyExpired() throws Exception { segments.getOrCreateSegment(7, context); assertNull(segments.getOrCreateSegment(0, context)); assertFalse(new File(context.stateDir(), "test/test-197001010000").exists()); }
@Test public void shouldCleanupSegmentsThatHaveExpired() throws Exception { final Segment segment1 = segments.getOrCreateSegment(0, context); final Segment segment2 = segments.getOrCreateSegment(0, context); final Segment segment3 = segments.getOrCreateSegment(7, context); assertFalse(segment1.isOpen()); assertFalse(segment2.isOpen()); assertTrue(segment3.isOpen()); assertFalse(new File(context.stateDir(), "test/test-197001010000").exists()); assertFalse(new File(context.stateDir(), "test/test-197001010001").exists()); assertTrue(new File(context.stateDir(), "test/test-197001010007").exists()); }
@Test public void shouldRollSegments() throws Exception { segments.getOrCreateSegment(0, context); verifyCorrectSegments(0, 1); segments.getOrCreateSegment(1, context); verifyCorrectSegments(0, 2); segments.getOrCreateSegment(2, context); verifyCorrectSegments(0, 3); segments.getOrCreateSegment(3, context); verifyCorrectSegments(0, 4); segments.getOrCreateSegment(4, context); verifyCorrectSegments(0, 5); segments.getOrCreateSegment(5, context); verifyCorrectSegments(1, 5); segments.getOrCreateSegment(6, context); verifyCorrectSegments(2, 5); } |
### Question:
CachingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V>, CachedStateStore<K, V> { @Override public KeyValueIterator<K, V> all() { validateStoreOpen(); final KeyValueIterator<Bytes, byte[]> storeIterator = new DelegatingPeekingKeyValueIterator<>(this.name(), underlying.all()); final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.all(cacheName); return new MergedSortedCacheKeyValueStoreIterator<>(cacheIterator, storeIterator, serdes); } CachingKeyValueStore(final KeyValueStore<Bytes, byte[]> underlying,
final Serde<K> keySerde,
final Serde<V> valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); void setFlushListener(final CacheFlushListener<K, V> flushListener); @Override synchronized void flush(); @Override void close(); @Override boolean persistent(); @Override boolean isOpen(); @Override synchronized V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override synchronized long approximateNumEntries(); @Override synchronized void put(final K key, final V value); @Override synchronized V putIfAbsent(final K key, final V value); @Override synchronized void putAll(final List<KeyValue<K, V>> entries); @Override synchronized V delete(final K key); @Override StateStore inner(); }### Answer:
@Test public void shouldIterateAllStoredItems() throws Exception { int items = addItemsToCache(); final KeyValueIterator<String, String> all = store.all(); final List<String> results = new ArrayList<>(); while (all.hasNext()) { results.add(all.next().key); } assertEquals(items, results.size()); } |
### Question:
CachingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V>, CachedStateStore<K, V> { @Override public KeyValueIterator<K, V> range(final K from, final K to) { validateStoreOpen(); final Bytes origFrom = Bytes.wrap(serdes.rawKey(from)); final Bytes origTo = Bytes.wrap(serdes.rawKey(to)); final KeyValueIterator<Bytes, byte[]> storeIterator = underlying.range(origFrom, origTo); final ThreadCache.MemoryLRUCacheBytesIterator cacheIterator = cache.range(cacheName, origFrom, origTo); return new MergedSortedCacheKeyValueStoreIterator<>(cacheIterator, storeIterator, serdes); } CachingKeyValueStore(final KeyValueStore<Bytes, byte[]> underlying,
final Serde<K> keySerde,
final Serde<V> valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); void setFlushListener(final CacheFlushListener<K, V> flushListener); @Override synchronized void flush(); @Override void close(); @Override boolean persistent(); @Override boolean isOpen(); @Override synchronized V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override synchronized long approximateNumEntries(); @Override synchronized void put(final K key, final V value); @Override synchronized V putIfAbsent(final K key, final V value); @Override synchronized void putAll(final List<KeyValue<K, V>> entries); @Override synchronized V delete(final K key); @Override StateStore inner(); }### Answer:
@Test public void shouldIterateOverRange() throws Exception { int items = addItemsToCache(); final KeyValueIterator<String, String> range = store.range(String.valueOf(0), String.valueOf(items)); final List<String> results = new ArrayList<>(); while (range.hasNext()) { results.add(range.next().key); } assertEquals(items, results.size()); } |
### Question:
CachingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V>, CachedStateStore<K, V> { @Override public synchronized V get(final K key) { validateStoreOpen(); if (key == null) { return null; } final byte[] rawKey = serdes.rawKey(key); return get(rawKey); } CachingKeyValueStore(final KeyValueStore<Bytes, byte[]> underlying,
final Serde<K> keySerde,
final Serde<V> valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); void setFlushListener(final CacheFlushListener<K, V> flushListener); @Override synchronized void flush(); @Override void close(); @Override boolean persistent(); @Override boolean isOpen(); @Override synchronized V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override synchronized long approximateNumEntries(); @Override synchronized void put(final K key, final V value); @Override synchronized V putIfAbsent(final K key, final V value); @Override synchronized void putAll(final List<KeyValue<K, V>> entries); @Override synchronized V delete(final K key); @Override StateStore inner(); }### Answer:
@Test public void shouldReturnNullIfKeyIsNull() throws Exception { assertNull(store.get(null)); } |
### Question:
SegmentIterator implements KeyValueIterator<Bytes, byte[]> { @Override public Bytes peekNextKey() { if (!hasNext()) { throw new NoSuchElementException(); } return currentIterator.peekNextKey(); } SegmentIterator(final Iterator<Segment> segments,
final HasNextCondition hasNextCondition,
final Bytes from,
final Bytes to); void close(); @Override Bytes peekNextKey(); @Override boolean hasNext(); KeyValue<Bytes, byte[]> next(); void remove(); }### Answer:
@Test(expected = NoSuchElementException.class) public void shouldThrowNoSuchElementOnPeekNextKeyIfNoNext() throws Exception { iterator = new SegmentIterator(Arrays.asList(segmentOne, segmentTwo).iterator(), hasNextCondition, Bytes.wrap("f".getBytes()), Bytes.wrap("h".getBytes())); iterator.peekNextKey(); } |
### Question:
SegmentIterator implements KeyValueIterator<Bytes, byte[]> { public KeyValue<Bytes, byte[]> next() { if (!hasNext()) { throw new NoSuchElementException(); } return currentIterator.next(); } SegmentIterator(final Iterator<Segment> segments,
final HasNextCondition hasNextCondition,
final Bytes from,
final Bytes to); void close(); @Override Bytes peekNextKey(); @Override boolean hasNext(); KeyValue<Bytes, byte[]> next(); void remove(); }### Answer:
@Test(expected = NoSuchElementException.class) public void shouldThrowNoSuchElementOnNextIfNoNext() throws Exception { iterator = new SegmentIterator(Arrays.asList(segmentOne, segmentTwo).iterator(), hasNextCondition, Bytes.wrap("f".getBytes()), Bytes.wrap("h".getBytes())); iterator.next(); } |
### Question:
FilteredCacheIterator implements PeekingKeyValueIterator<Bytes, LRUCacheEntry> { @Override public void remove() { throw new UnsupportedOperationException(); } FilteredCacheIterator(final PeekingKeyValueIterator<Bytes, LRUCacheEntry> cacheIterator,
final HasNextCondition hasNextCondition,
final CacheFunction cacheFunction); @Override void close(); @Override Bytes peekNextKey(); @Override boolean hasNext(); @Override KeyValue<Bytes, LRUCacheEntry> next(); @Override void remove(); @Override KeyValue<Bytes, LRUCacheEntry> peekNext(); }### Answer:
@Test(expected = UnsupportedOperationException.class) public void shouldThrowUnsupportedOperationExeceptionOnRemove() throws Exception { allIterator.remove(); } |
### Question:
ChangeLoggingSegmentedBytesStore extends WrappedStateStore.AbstractStateStore implements SegmentedBytesStore { @Override public KeyValueIterator<Bytes, byte[]> fetch(final Bytes key, final long from, final long to) { return bytesStore.fetch(key, from, to); } ChangeLoggingSegmentedBytesStore(final SegmentedBytesStore bytesStore); @Override KeyValueIterator<Bytes, byte[]> fetch(final Bytes key, final long from, final long to); @Override KeyValueIterator<Bytes, byte[]> fetch(Bytes keyFrom, Bytes keyTo, long from, long to); @Override void remove(final Bytes key); @Override void put(final Bytes key, final byte[] value); @Override byte[] get(final Bytes key); @Override @SuppressWarnings("unchecked") void init(final ProcessorContext context, final StateStore root); }### Answer:
@Test public void shouldDelegateToUnderlyingStoreWhenFetching() throws Exception { store.fetch(Bytes.wrap(new byte[0]), 1, 1); assertTrue(bytesStore.fetchCalled); } |
### Question:
WrappingStoreProvider implements StateStoreProvider { public <T> List<T> stores(final String storeName, QueryableStoreType<T> type) { final List<T> allStores = new ArrayList<>(); for (StateStoreProvider provider : storeProviders) { final List<T> stores = provider.stores(storeName, type); allStores.addAll(stores); } if (allStores.isEmpty()) { throw new InvalidStateStoreException("the state store, " + storeName + ", may have migrated to another instance."); } return allStores; } WrappingStoreProvider(final List<StateStoreProvider> storeProviders); List<T> stores(final String storeName, QueryableStoreType<T> type); }### Answer:
@Test public void shouldFindKeyValueStores() throws Exception { List<ReadOnlyKeyValueStore<String, String>> results = wrappingStoreProvider.stores("kv", QueryableStoreTypes.<String, String>keyValueStore()); assertEquals(2, results.size()); }
@Test public void shouldFindWindowStores() throws Exception { final List<ReadOnlyWindowStore<Object, Object>> windowStores = wrappingStoreProvider.stores("window", windowStore()); assertEquals(2, windowStores.size()); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowInvalidStoreExceptionIfNoStoreOfTypeFound() throws Exception { wrappingStoreProvider.stores("doesn't exist", QueryableStoreTypes.keyValueStore()); } |
### Question:
ChangeLoggingKeyValueBytesStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<Bytes, byte[]> { @Override public byte[] putIfAbsent(final Bytes key, final byte[] value) { final byte[] previous = get(key); if (previous == null) { put(key, value); } return previous; } ChangeLoggingKeyValueBytesStore(final KeyValueStore<Bytes, byte[]> inner); @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final Bytes key, final byte[] value); @Override byte[] putIfAbsent(final Bytes key, final byte[] value); @Override void putAll(final List<KeyValue<Bytes, byte[]>> entries); @Override byte[] delete(final Bytes key); @Override byte[] get(final Bytes key); @Override KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to); @Override KeyValueIterator<Bytes, byte[]> all(); }### Answer:
@Test public void shouldReturnNullOnPutIfAbsentWhenNoPreviousValue() throws Exception { assertThat(store.putIfAbsent(hi, there), is(nullValue())); } |
### Question:
ChangeLoggingKeyValueBytesStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<Bytes, byte[]> { @Override public byte[] get(final Bytes key) { return inner.get(key); } ChangeLoggingKeyValueBytesStore(final KeyValueStore<Bytes, byte[]> inner); @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final Bytes key, final byte[] value); @Override byte[] putIfAbsent(final Bytes key, final byte[] value); @Override void putAll(final List<KeyValue<Bytes, byte[]>> entries); @Override byte[] delete(final Bytes key); @Override byte[] get(final Bytes key); @Override KeyValueIterator<Bytes, byte[]> range(final Bytes from, final Bytes to); @Override KeyValueIterator<Bytes, byte[]> all(); }### Answer:
@Test public void shouldReturnNullOnGetWhenDoesntExist() throws Exception { assertThat(store.get(hello), is(nullValue())); } |
### Question:
WindowKeySchema implements RocksDBSegmentedBytesStore.KeySchema { @Override public Bytes upperRange(final Bytes key, final long to) { final byte[] maxSuffix = ByteBuffer.allocate(SUFFIX_SIZE) .putLong(to) .putInt(Integer.MAX_VALUE) .array(); return OrderedBytes.upperRange(key, maxSuffix); } @Override void init(final String topic); @Override Bytes upperRange(final Bytes key, final long to); @Override Bytes lowerRange(final Bytes key, final long from); @Override Bytes lowerRangeFixedSize(final Bytes key, final long from); @Override Bytes upperRangeFixedSize(final Bytes key, final long to); @Override long segmentTimestamp(final Bytes key); @Override HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to); @Override List<Segment> segmentsToSearch(final Segments segments, final long from, final long to); }### Answer:
@Test public void testUpperBoundWithLargeTimestamps() throws Exception { Bytes upper = windowKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), Long.MAX_VALUE); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( WindowStoreUtils.toBinaryKey( new byte[]{0xA}, Long.MAX_VALUE, Integer.MAX_VALUE ) ) >= 0 ); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( WindowStoreUtils.toBinaryKey( new byte[]{0xA, 0xB}, Long.MAX_VALUE, Integer.MAX_VALUE ) ) >= 0 ); assertThat(upper, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xA}, Long.MAX_VALUE, Integer.MAX_VALUE))); }
@Test public void testUpperBoundWithKeyBytesLargerThanFirstTimestampByte() throws Exception { Bytes upper = windowKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, (byte) 0x8F, (byte) 0x9F}), Long.MAX_VALUE); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( WindowStoreUtils.toBinaryKey( new byte[]{0xA, (byte) 0x8F}, Long.MAX_VALUE, Integer.MAX_VALUE ) ) >= 0 ); assertThat(upper, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xA, (byte) 0x8F, (byte) 0x9F}, Long.MAX_VALUE, Integer.MAX_VALUE))); }
@Test public void testUpperBoundWithKeyBytesLargerAndSmallerThanFirstTimestampByte() throws Exception { Bytes upper = windowKeySchema.upperRange(Bytes.wrap(new byte[]{0xC, 0xC, 0x9}), 0x0AffffffffffffffL); assertThat( "shorter key with max timestamp should be in range", upper.compareTo( WindowStoreUtils.toBinaryKey( new byte[]{0xC, 0xC}, 0x0AffffffffffffffL, Integer.MAX_VALUE ) ) >= 0 ); assertThat(upper, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xC, 0xC}, 0x0AffffffffffffffL, Integer.MAX_VALUE))); }
@Test public void testUpperBoundWithZeroTimestamp() throws Exception { Bytes upper = windowKeySchema.upperRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), 0); assertThat(upper, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xA, 0xB, 0xC}, 0, Integer.MAX_VALUE))); } |
### Question:
WindowKeySchema implements RocksDBSegmentedBytesStore.KeySchema { @Override public Bytes lowerRange(final Bytes key, final long from) { return OrderedBytes.lowerRange(key, MIN_SUFFIX); } @Override void init(final String topic); @Override Bytes upperRange(final Bytes key, final long to); @Override Bytes lowerRange(final Bytes key, final long from); @Override Bytes lowerRangeFixedSize(final Bytes key, final long from); @Override Bytes upperRangeFixedSize(final Bytes key, final long to); @Override long segmentTimestamp(final Bytes key); @Override HasNextCondition hasNextCondition(final Bytes binaryKeyFrom, final Bytes binaryKeyTo, final long from, final long to); @Override List<Segment> segmentsToSearch(final Segments segments, final long from, final long to); }### Answer:
@Test public void testLowerBoundWithZeroTimestamp() throws Exception { Bytes lower = windowKeySchema.lowerRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), 0); assertThat(lower, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xA, 0xB, 0xC}, 0, 0))); }
@Test public void testLowerBoundWithMonZeroTimestamp() throws Exception { Bytes lower = windowKeySchema.lowerRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), 42); assertThat(lower, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xA, 0xB, 0xC}, 0, 0))); }
@Test public void testLowerBoundMatchesTrailingZeros() throws Exception { Bytes lower = windowKeySchema.lowerRange(Bytes.wrap(new byte[]{0xA, 0xB, 0xC}), Long.MAX_VALUE - 1); assertThat( "appending zeros to key should still be in range", lower.compareTo( WindowStoreUtils.toBinaryKey( new byte[]{0xA, 0xB, 0xC, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, Long.MAX_VALUE - 1, 0 ) ) < 0 ); assertThat(lower, equalTo(WindowStoreUtils.toBinaryKey(new byte[]{0xA, 0xB, 0xC}, 0, 0))); } |
### Question:
DelegatingPeekingKeyValueIterator implements KeyValueIterator<K, V>, PeekingKeyValueIterator<K, V> { @Override public synchronized KeyValue<K, V> next() { if (!hasNext()) { throw new NoSuchElementException(); } final KeyValue<K, V> result = next; next = null; return result; } DelegatingPeekingKeyValueIterator(final String storeName, final KeyValueIterator<K, V> underlying); @Override synchronized K peekNextKey(); @Override synchronized void close(); @Override synchronized boolean hasNext(); @Override synchronized KeyValue<K, V> next(); @Override void remove(); @Override KeyValue<K, V> peekNext(); }### Answer:
@Test(expected = NoSuchElementException.class) public void shouldThrowNoSuchElementWhenNoMoreItemsLeftAndNextCalled() throws Exception { final DelegatingPeekingKeyValueIterator<String, String> peekingIterator = new DelegatingPeekingKeyValueIterator<>(name, store.all()); peekingIterator.next(); } |
### Question:
DelegatingPeekingKeyValueIterator implements KeyValueIterator<K, V>, PeekingKeyValueIterator<K, V> { @Override public synchronized K peekNextKey() { if (!hasNext()) { throw new NoSuchElementException(); } return next.key; } DelegatingPeekingKeyValueIterator(final String storeName, final KeyValueIterator<K, V> underlying); @Override synchronized K peekNextKey(); @Override synchronized void close(); @Override synchronized boolean hasNext(); @Override synchronized KeyValue<K, V> next(); @Override void remove(); @Override KeyValue<K, V> peekNext(); }### Answer:
@Test(expected = NoSuchElementException.class) public void shouldThrowNoSuchElementWhenNoMoreItemsLeftAndPeekNextCalled() throws Exception { final DelegatingPeekingKeyValueIterator<String, String> peekingIterator = new DelegatingPeekingKeyValueIterator<>(name, store.all()); peekingIterator.peekNextKey(); } |
### Question:
CompositeReadOnlyKeyValueStore implements ReadOnlyKeyValueStore<K, V> { @Override public V get(final K key) { final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); for (ReadOnlyKeyValueStore<K, V> store : stores) { try { final V result = store.get(key); if (result != null) { return result; } } catch (InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } return null; } CompositeReadOnlyKeyValueStore(final StateStoreProvider storeProvider,
final QueryableStoreType<ReadOnlyKeyValueStore<K, V>> storeType,
final String storeName); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override long approximateNumEntries(); }### Answer:
@Test public void shouldReturnNullIfKeyDoesntExist() throws Exception { assertNull(theStore.get("whatever")); }
@Test public void shouldReturnValueIfExists() throws Exception { stubOneUnderlying.put("key", "value"); assertEquals("value", theStore.get("key")); }
@Test public void shouldNotGetValuesFromOtherStores() throws Exception { otherUnderlyingStore.put("otherKey", "otherValue"); assertNull(theStore.get("otherKey")); }
@SuppressWarnings("unchecked") @Test public void shouldFindValueForKeyWhenMultiStores() throws Exception { final KeyValueStore<String, String> cache = newStoreInstance(); stubProviderTwo.addStore(storeName, cache); cache.put("key-two", "key-two-value"); stubOneUnderlying.put("key-one", "key-one-value"); assertEquals("key-two-value", theStore.get("key-two")); assertEquals("key-one-value", theStore.get("key-one")); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowInvalidStoreExceptionDuringRebalance() throws Exception { rebalancing().get("anything"); } |
### Question:
CompositeReadOnlyKeyValueStore implements ReadOnlyKeyValueStore<K, V> { @Override public KeyValueIterator<K, V> range(final K from, final K to) { final NextIteratorFunction<K, V> nextIteratorFunction = new NextIteratorFunction<K, V>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.range(from, to); } catch (InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>(storeName, new CompositeKeyValueIterator(stores.iterator(), nextIteratorFunction)); } CompositeReadOnlyKeyValueStore(final StateStoreProvider storeProvider,
final QueryableStoreType<ReadOnlyKeyValueStore<K, V>> storeType,
final String storeName); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override long approximateNumEntries(); }### Answer:
@Test public void shouldSupportRange() throws Exception { stubOneUnderlying.put("a", "a"); stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("c", "c"); final List<KeyValue<String, String>> results = toList(theStore.range("a", "b")); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertEquals(2, results.size()); }
@SuppressWarnings("unchecked") @Test public void shouldSupportRangeAcrossMultipleKVStores() throws Exception { final KeyValueStore<String, String> cache = newStoreInstance(); stubProviderTwo.addStore(storeName, cache); stubOneUnderlying.put("a", "a"); stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("z", "z"); cache.put("c", "c"); cache.put("d", "d"); cache.put("x", "x"); final List<KeyValue<String, String>> results = toList(theStore.range("a", "e")); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertTrue(results.contains(new KeyValue<>("c", "c"))); assertTrue(results.contains(new KeyValue<>("d", "d"))); assertEquals(4, results.size()); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowInvalidStoreExceptionOnRangeDuringRebalance() throws Exception { rebalancing().range("anything", "something"); } |
### Question:
CompositeReadOnlyKeyValueStore implements ReadOnlyKeyValueStore<K, V> { @Override public KeyValueIterator<K, V> all() { final NextIteratorFunction<K, V> nextIteratorFunction = new NextIteratorFunction<K, V>() { @Override public KeyValueIterator<K, V> apply(final ReadOnlyKeyValueStore<K, V> store) { try { return store.all(); } catch (InvalidStateStoreException e) { throw new InvalidStateStoreException("State store is not available anymore and may have been migrated to another instance; please re-discover its location from the state metadata."); } } }; final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); return new DelegatingPeekingKeyValueIterator<>(storeName, new CompositeKeyValueIterator(stores.iterator(), nextIteratorFunction)); } CompositeReadOnlyKeyValueStore(final StateStoreProvider storeProvider,
final QueryableStoreType<ReadOnlyKeyValueStore<K, V>> storeType,
final String storeName); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override long approximateNumEntries(); }### Answer:
@Test public void shouldSupportAllAcrossMultipleStores() throws Exception { final KeyValueStore<String, String> cache = newStoreInstance(); stubProviderTwo.addStore(storeName, cache); stubOneUnderlying.put("a", "a"); stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("z", "z"); cache.put("c", "c"); cache.put("d", "d"); cache.put("x", "x"); final List<KeyValue<String, String>> results = toList(theStore.all()); assertTrue(results.contains(new KeyValue<>("a", "a"))); assertTrue(results.contains(new KeyValue<>("b", "b"))); assertTrue(results.contains(new KeyValue<>("c", "c"))); assertTrue(results.contains(new KeyValue<>("d", "d"))); assertTrue(results.contains(new KeyValue<>("x", "x"))); assertTrue(results.contains(new KeyValue<>("z", "z"))); assertEquals(6, results.size()); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowInvalidStoreExceptionOnAllDuringRebalance() throws Exception { rebalancing().all(); } |
### Question:
CompositeReadOnlyKeyValueStore implements ReadOnlyKeyValueStore<K, V> { @Override public long approximateNumEntries() { final List<ReadOnlyKeyValueStore<K, V>> stores = storeProvider.stores(storeName, storeType); long total = 0; for (ReadOnlyKeyValueStore<K, V> store : stores) { total += store.approximateNumEntries(); } return total < 0 ? Long.MAX_VALUE : total; } CompositeReadOnlyKeyValueStore(final StateStoreProvider storeProvider,
final QueryableStoreType<ReadOnlyKeyValueStore<K, V>> storeType,
final String storeName); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); @Override long approximateNumEntries(); }### Answer:
@Test public void shouldGetApproximateEntriesAcrossAllStores() throws Exception { final KeyValueStore<String, String> cache = newStoreInstance(); stubProviderTwo.addStore(storeName, cache); stubOneUnderlying.put("a", "a"); stubOneUnderlying.put("b", "b"); stubOneUnderlying.put("z", "z"); cache.put("c", "c"); cache.put("d", "d"); cache.put("x", "x"); assertEquals(6, theStore.approximateNumEntries()); }
@Test public void shouldReturnLongMaxValueOnOverflow() throws Exception { stubProviderTwo.addStore(storeName, new NoOpReadOnlyStore<Object, Object>() { @Override public long approximateNumEntries() { return Long.MAX_VALUE; } }); stubOneUnderlying.put("overflow", "me"); assertEquals(Long.MAX_VALUE, theStore.approximateNumEntries()); } |
### Question:
ChangeLoggingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V> { @Override public void put(final K key, final V value) { final Bytes bytesKey = Bytes.wrap(serdes.rawKey(key)); final byte[] bytesValue = serdes.rawValue(value); innerBytes.put(bytesKey, bytesValue); } ChangeLoggingKeyValueStore(final KeyValueStore<Bytes, byte[]> bytesStore,
final Serde keySerde,
final Serde valueSerde); private ChangeLoggingKeyValueStore(final ChangeLoggingKeyValueBytesStore bytesStore,
final Serde keySerde,
final Serde valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final K key, final V value); @Override V putIfAbsent(final K key, final V value); @Override void putAll(final List<KeyValue<K, V>> entries); @Override V delete(final K key); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); }### Answer:
@Test public void shouldWriteKeyValueBytesToInnerStoreOnPut() throws Exception { store.put(hi, there); assertThat(deserializedValueFromInner(hi), equalTo(there)); }
@Test public void shouldWriteKeyValueBytesToInnerStoreOnPut() { store.put(hi, there); assertThat(deserializedValueFromInner(hi), equalTo(there)); } |
### Question:
ChangeLoggingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V> { @Override public void putAll(final List<KeyValue<K, V>> entries) { final List<KeyValue<Bytes, byte[]>> keyValues = new ArrayList<>(); for (final KeyValue<K, V> entry : entries) { keyValues.add(KeyValue.pair(Bytes.wrap(serdes.rawKey(entry.key)), serdes.rawValue(entry.value))); } innerBytes.putAll(keyValues); } ChangeLoggingKeyValueStore(final KeyValueStore<Bytes, byte[]> bytesStore,
final Serde keySerde,
final Serde valueSerde); private ChangeLoggingKeyValueStore(final ChangeLoggingKeyValueBytesStore bytesStore,
final Serde keySerde,
final Serde valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final K key, final V value); @Override V putIfAbsent(final K key, final V value); @Override void putAll(final List<KeyValue<K, V>> entries); @Override V delete(final K key); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); }### Answer:
@Test public void shouldWriteAllKeyValueToInnerStoreOnPutAll() throws Exception { store.putAll(Arrays.asList(KeyValue.pair(hello, world), KeyValue.pair(hi, there))); assertThat(deserializedValueFromInner(hello), equalTo(world)); assertThat(deserializedValueFromInner(hi), equalTo(there)); }
@Test public void shouldWriteAllKeyValueToInnerStoreOnPutAll() { store.putAll(Arrays.asList(KeyValue.pair(hello, world), KeyValue.pair(hi, there))); assertThat(deserializedValueFromInner(hello), equalTo(world)); assertThat(deserializedValueFromInner(hi), equalTo(there)); } |
### Question:
ChangeLoggingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V> { @Override public V delete(final K key) { final byte[] oldValue = innerBytes.delete(Bytes.wrap(serdes.rawKey(key))); if (oldValue == null) { return null; } return serdes.valueFrom(oldValue); } ChangeLoggingKeyValueStore(final KeyValueStore<Bytes, byte[]> bytesStore,
final Serde keySerde,
final Serde valueSerde); private ChangeLoggingKeyValueStore(final ChangeLoggingKeyValueBytesStore bytesStore,
final Serde keySerde,
final Serde valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final K key, final V value); @Override V putIfAbsent(final K key, final V value); @Override void putAll(final List<KeyValue<K, V>> entries); @Override V delete(final K key); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); }### Answer:
@Test public void shouldReturnNullOnDeleteIfNoOldValue() throws Exception { assertThat(store.delete(hi), is(nullValue())); }
@Test public void shouldReturnNullOnDeleteIfNoOldValue() { assertThat(store.delete(hi), is(nullValue())); } |
### Question:
ChangeLoggingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V> { @Override public V putIfAbsent(final K key, final V value) { final V v = get(key); if (v == null) { put(key, value); } return v; } ChangeLoggingKeyValueStore(final KeyValueStore<Bytes, byte[]> bytesStore,
final Serde keySerde,
final Serde valueSerde); private ChangeLoggingKeyValueStore(final ChangeLoggingKeyValueBytesStore bytesStore,
final Serde keySerde,
final Serde valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final K key, final V value); @Override V putIfAbsent(final K key, final V value); @Override void putAll(final List<KeyValue<K, V>> entries); @Override V delete(final K key); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); }### Answer:
@Test public void shouldReturnNullOnPutIfAbsentWhenNoPreviousValue() throws Exception { assertThat(store.putIfAbsent(hi, there), is(nullValue())); }
@Test public void shouldReturnNullOnPutIfAbsentWhenNoPreviousValue() { assertThat(store.putIfAbsent(hi, there), is(nullValue())); } |
### Question:
TimestampConverter implements Transformation<R> { @Override public void configure(Map<String, ?> configs) { final SimpleConfig simpleConfig = new SimpleConfig(CONFIG_DEF, configs); final String field = simpleConfig.getString(FIELD_CONFIG); final String type = simpleConfig.getString(TARGET_TYPE_CONFIG); String formatPattern = simpleConfig.getString(FORMAT_CONFIG); schemaUpdateCache = new SynchronizedCache<>(new LRUCache<Schema, Schema>(16)); if (!VALID_TYPES.contains(type)) { throw new ConfigException("Unknown timestamp type in TimestampConverter: " + type + ". Valid values are " + Utils.join(VALID_TYPES, ", ") + "."); } if (type.equals(TYPE_STRING) && formatPattern.trim().isEmpty()) { throw new ConfigException("TimestampConverter requires format option to be specified when using string timestamps"); } SimpleDateFormat format = null; if (formatPattern != null && !formatPattern.trim().isEmpty()) { try { format = new SimpleDateFormat(formatPattern); format.setTimeZone(UTC); } catch (IllegalArgumentException e) { throw new ConfigException("TimestampConverter requires a SimpleDateFormat-compatible pattern for string timestamps: " + formatPattern, e); } } config = new Config(field, type, format); } @Override void configure(Map<String, ?> configs); @Override R apply(R record); @Override ConfigDef config(); @Override void close(); static final String OVERVIEW_DOC; static final String FIELD_CONFIG; static final String TARGET_TYPE_CONFIG; static final String FORMAT_CONFIG; static final ConfigDef CONFIG_DEF; }### Answer:
@Test(expected = ConfigException.class) public void testConfigNoTargetType() { TimestampConverter<SourceRecord> xform = new TimestampConverter.Value<>(); xform.configure(Collections.<String, String>emptyMap()); }
@Test(expected = ConfigException.class) public void testConfigInvalidTargetType() { TimestampConverter<SourceRecord> xform = new TimestampConverter.Value<>(); xform.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "invalid")); }
@Test(expected = ConfigException.class) public void testConfigMissingFormat() { TimestampConverter<SourceRecord> xform = new TimestampConverter.Value<>(); xform.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "string")); }
@Test(expected = ConfigException.class) public void testConfigInvalidFormat() { TimestampConverter<SourceRecord> xform = new TimestampConverter.Value<>(); Map<String, String> config = new HashMap<>(); config.put(TimestampConverter.TARGET_TYPE_CONFIG, "string"); config.put(TimestampConverter.FORMAT_CONFIG, "bad-format"); xform.configure(config); } |
### Question:
ChangeLoggingKeyValueStore extends WrappedStateStore.AbstractStateStore implements KeyValueStore<K, V> { @Override public V get(final K key) { final byte[] rawValue = innerBytes.get(Bytes.wrap(serdes.rawKey(key))); if (rawValue == null) { return null; } return serdes.valueFrom(rawValue); } ChangeLoggingKeyValueStore(final KeyValueStore<Bytes, byte[]> bytesStore,
final Serde keySerde,
final Serde valueSerde); private ChangeLoggingKeyValueStore(final ChangeLoggingKeyValueBytesStore bytesStore,
final Serde keySerde,
final Serde valueSerde); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context, final StateStore root); @Override long approximateNumEntries(); @Override void put(final K key, final V value); @Override V putIfAbsent(final K key, final V value); @Override void putAll(final List<KeyValue<K, V>> entries); @Override V delete(final K key); @Override V get(final K key); @Override KeyValueIterator<K, V> range(final K from, final K to); @Override KeyValueIterator<K, V> all(); }### Answer:
@Test public void shouldReturnNullOnGetWhenDoesntExist() throws Exception { assertThat(store.get(hello), is(nullValue())); }
@Test public void shouldReturnNullOnGetWhenDoesntExist() { assertThat(store.get(hello), is(nullValue())); } |
### Question:
QueryableStoreProvider { public <T> T getStore(final String storeName, final QueryableStoreType<T> queryableStoreType) { final List<T> globalStore = globalStoreProvider.stores(storeName, queryableStoreType); if (!globalStore.isEmpty()) { return queryableStoreType.create(new WrappingStoreProvider(Collections.<StateStoreProvider>singletonList(globalStoreProvider)), storeName); } final List<T> allStores = new ArrayList<>(); for (StateStoreProvider storeProvider : storeProviders) { allStores.addAll(storeProvider.stores(storeName, queryableStoreType)); } if (allStores.isEmpty()) { throw new InvalidStateStoreException("the state store, " + storeName + ", may have migrated to another instance."); } return queryableStoreType.create( new WrappingStoreProvider(storeProviders), storeName); } QueryableStoreProvider(final List<StateStoreProvider> storeProviders,
final GlobalStateStoreProvider globalStateStoreProvider); T getStore(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test(expected = InvalidStateStoreException.class) public void shouldThrowExceptionIfKVStoreDoesntExist() throws Exception { storeProvider.getStore("not-a-store", QueryableStoreTypes.keyValueStore()); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowExceptionIfWindowStoreDoesntExist() throws Exception { storeProvider.getStore("not-a-store", QueryableStoreTypes.windowStore()); }
@Test public void shouldReturnKVStoreWhenItExists() throws Exception { assertNotNull(storeProvider.getStore(keyValueStore, QueryableStoreTypes.keyValueStore())); }
@Test public void shouldReturnWindowStoreWhenItExists() throws Exception { assertNotNull(storeProvider.getStore(windowStore, QueryableStoreTypes.windowStore())); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowExceptionWhenLookingForWindowStoreWithDifferentType() throws Exception { storeProvider.getStore(windowStore, QueryableStoreTypes.keyValueStore()); }
@Test(expected = InvalidStateStoreException.class) public void shouldThrowExceptionWhenLookingForKVStoreWithDifferentType() throws Exception { storeProvider.getStore(keyValueStore, QueryableStoreTypes.windowStore()); }
@Test public void shouldFindGlobalStores() throws Exception { globalStateStores.put("global", new NoOpReadOnlyStore<>()); assertNotNull(storeProvider.getStore("global", QueryableStoreTypes.keyValueStore())); } |
### Question:
StoreChangeLogger { void logChange(final K key, final V value) { if (collector != null) { final Serializer<K> keySerializer = serialization.keySerializer(); final Serializer<V> valueSerializer = serialization.valueSerializer(); collector.send(this.topic, key, value, this.partition, context.timestamp(), keySerializer, valueSerializer); } } StoreChangeLogger(String storeName, ProcessorContext context, StateSerdes<K, V> serialization); private StoreChangeLogger(String storeName, ProcessorContext context, int partition, StateSerdes<K, V> serialization); }### Answer:
@SuppressWarnings("unchecked") @Test public void testAddRemove() throws Exception { context.setTime(1); changeLogger.logChange(0, "zero"); changeLogger.logChange(1, "one"); changeLogger.logChange(2, "two"); assertEquals("zero", logged.get(0)); assertEquals("one", logged.get(1)); assertEquals("two", logged.get(2)); changeLogger.logChange(0, null); assertNull(logged.get(0)); } |
### Question:
KafkaStreams { public void close() { close(DEFAULT_CLOSE_TIMEOUT, TimeUnit.SECONDS); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test public void testCloseIsIdempotent() throws Exception { streams.close(); final int closeCount = MockMetricsReporter.CLOSE_COUNT.get(); streams.close(); Assert.assertEquals("subsequent close() calls should do nothing", closeCount, MockMetricsReporter.CLOSE_COUNT.get()); } |
### Question:
KafkaStreams { public Map<MetricName, ? extends Metric> metrics() { return Collections.unmodifiableMap(metrics.metrics()); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test public void testNumberDefaultMetrics() { final KafkaStreams streams = createKafkaStreams(); final Map<MetricName, ? extends Metric> metrics = streams.metrics(); assertEquals(metrics.size(), 16); } |
### Question:
KafkaStreams { public Collection<StreamsMetadata> allMetadata() { validateIsRunning(); return streamsMetadataState.getAllMetadata(); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldNotGetAllTasksWhenNotRunning() throws Exception { streams.allMetadata(); } |
### Question:
KafkaStreams { public Collection<StreamsMetadata> allMetadataForStore(final String storeName) { validateIsRunning(); return streamsMetadataState.getAllMetadataForStore(storeName); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldNotGetAllTasksWithStoreWhenNotRunning() throws Exception { streams.allMetadataForStore("store"); } |
### Question:
KafkaStreams { public <K> StreamsMetadata metadataForKey(final String storeName, final K key, final Serializer<K> keySerializer) { validateIsRunning(); return streamsMetadataState.getMetadataWithKey(storeName, key, keySerializer); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldNotGetTaskWithKeyAndSerializerWhenNotRunning() throws Exception { streams.metadataForKey("store", "key", Serdes.String().serializer()); }
@Test(expected = IllegalStateException.class) public void shouldNotGetTaskWithKeyAndPartitionerWhenNotRunning() throws Exception { streams.metadataForKey("store", "key", new StreamPartitioner<String, Object>() { @Override public Integer partition(final String key, final Object value, final int numPartitions) { return 0; } }); } |
### Question:
KafkaStreams { public void cleanUp() { if (state.isRunning()) { throw new IllegalStateException("Cannot clean up while running."); } final String appId = config.getString(StreamsConfig.APPLICATION_ID_CONFIG); final String stateDir = config.getString(StreamsConfig.STATE_DIR_CONFIG); final String localApplicationDir = stateDir + File.separator + appId; log.debug("{} Removing local Kafka Streams application data in {} for application {}.", logPrefix, localApplicationDir, appId); final StateDirectory stateDirectory = new StateDirectory(appId, "cleanup", stateDir, Time.SYSTEM); stateDirectory.cleanRemovedTasks(0); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test public void testCleanup() throws Exception { final Properties props = new Properties(); props.setProperty(StreamsConfig.APPLICATION_ID_CONFIG, "testLocalCleanup"); props.setProperty(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, CLUSTER.bootstrapServers()); final KStreamBuilder builder = new KStreamBuilder(); final KafkaStreams streams = new KafkaStreams(builder, props); streams.cleanUp(); streams.start(); streams.close(); streams.cleanUp(); } |
### Question:
KafkaStreams { @Override public String toString() { return toString(""); } KafkaStreams(final TopologyBuilder builder, final Properties props); KafkaStreams(final TopologyBuilder builder, final StreamsConfig config); KafkaStreams(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier); void setStateListener(final KafkaStreams.StateListener listener); synchronized State state(); Map<MetricName, ? extends Metric> metrics(); synchronized void start(); void close(); synchronized boolean close(final long timeout, final TimeUnit timeUnit); @Override String toString(); String toString(final String indent); void cleanUp(); void setUncaughtExceptionHandler(final Thread.UncaughtExceptionHandler eh); Collection<StreamsMetadata> allMetadata(); Collection<StreamsMetadata> allMetadataForStore(final String storeName); StreamsMetadata metadataForKey(final String storeName,
final K key,
final Serializer<K> keySerializer); StreamsMetadata metadataForKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); T store(final String storeName, final QueryableStoreType<T> queryableStoreType); }### Answer:
@Test public void testToString() { streams.start(); String streamString = streams.toString(); streams.close(); String appId = streamString.split("\\n")[1].split(":")[1].trim(); Assert.assertNotEquals("streamString should not be empty", "", streamString); Assert.assertNotNull("streamString should not be null", streamString); Assert.assertNotEquals("streamString contains non-empty appId", "", appId); Assert.assertNotNull("streamString contains non-null appId", appId); } |
### Question:
Windows { protected Windows<W> segments(final int segments) throws IllegalArgumentException { if (segments < 2) { throw new IllegalArgumentException("Number of segments must be at least 2."); } this.segments = segments; return this; } protected Windows(); Windows<W> until(final long durationMs); long maintainMs(); abstract Map<Long, W> windowsFor(final long timestamp); abstract long size(); public int segments; }### Answer:
@Test public void shouldSetNumberOfSegments() { final int anySegmentSizeLargerThanOne = 5; assertEquals(anySegmentSizeLargerThanOne, new TestWindows().segments(anySegmentSizeLargerThanOne).segments); }
@Test(expected = IllegalArgumentException.class) public void numberOfSegmentsMustBeAtLeastTwo() { new TestWindows().segments(1); } |
### Question:
Windows { public Windows<W> until(final long durationMs) throws IllegalArgumentException { if (durationMs < 0) { throw new IllegalArgumentException("Window retention time (durationMs) cannot be negative."); } maintainDurationMs = durationMs; return this; } protected Windows(); Windows<W> until(final long durationMs); long maintainMs(); abstract Map<Long, W> windowsFor(final long timestamp); abstract long size(); public int segments; }### Answer:
@Test(expected = IllegalArgumentException.class) public void retentionTimeMustNotBeNegative() { new TestWindows().until(-1); } |
### Question:
TimeWindows extends Windows<TimeWindow> { public static TimeWindows of(final long sizeMs) throws IllegalArgumentException { if (sizeMs <= 0) { throw new IllegalArgumentException("Window size (sizeMs) must be larger than zero."); } return new TimeWindows(sizeMs, sizeMs); } private TimeWindows(final long sizeMs, final long advanceMs); static TimeWindows of(final long sizeMs); TimeWindows advanceBy(final long advanceMs); @Override Map<Long, TimeWindow> windowsFor(final long timestamp); @Override long size(); @Override TimeWindows until(final long durationMs); @Override long maintainMs(); @Override boolean equals(final Object o); @Override int hashCode(); final long sizeMs; final long advanceMs; }### Answer:
@Test public void shouldSetWindowSize() { assertEquals(ANY_SIZE, TimeWindows.of(ANY_SIZE).sizeMs); }
@Test(expected = IllegalArgumentException.class) public void windowSizeMustNotBeZero() { TimeWindows.of(0); }
@Test(expected = IllegalArgumentException.class) public void windowSizeMustNotBeNegative() { TimeWindows.of(-1); } |
### Question:
SetSchemaMetadata implements Transformation<R> { protected static Object updateSchemaIn(Object keyOrValue, Schema updatedSchema) { if (keyOrValue instanceof Struct) { Struct origStruct = (Struct) keyOrValue; Struct newStruct = new Struct(updatedSchema); for (Field field : updatedSchema.fields()) { newStruct.put(field, origStruct.get(field)); } return newStruct; } return keyOrValue; } @Override void configure(Map<String, ?> configs); @Override R apply(R record); @Override ConfigDef config(); @Override void close(); static final String OVERVIEW_DOC; static final ConfigDef CONFIG_DEF; }### Answer:
@Test public void updateSchemaOfNonStruct() { Object value = new Integer(1); Object updatedValue = SetSchemaMetadata.updateSchemaIn(value, Schema.INT32_SCHEMA); assertSame(value, updatedValue); }
@Test public void updateSchemaOfStruct() { final String fieldName1 = "f1"; final String fieldName2 = "f2"; final String fieldValue1 = "value1"; final int fieldValue2 = 1; final Schema schema = SchemaBuilder.struct() .name("my.orig.SchemaDefn") .field(fieldName1, Schema.STRING_SCHEMA) .field(fieldName2, Schema.INT32_SCHEMA) .build(); final Struct value = new Struct(schema).put(fieldName1, fieldValue1).put(fieldName2, fieldValue2); final Schema newSchema = SchemaBuilder.struct() .name("my.updated.SchemaDefn") .field(fieldName1, Schema.STRING_SCHEMA) .field(fieldName2, Schema.INT32_SCHEMA) .build(); Struct newValue = (Struct) SetSchemaMetadata.updateSchemaIn(value, newSchema); assertMatchingSchema(newValue, newSchema); }
@Test public void updateSchemaOfNonStruct() { Object value = Integer.valueOf(1); Object updatedValue = SetSchemaMetadata.updateSchemaIn(value, Schema.INT32_SCHEMA); assertSame(value, updatedValue); }
@Test public void updateSchemaOfNull() { Object updatedValue = SetSchemaMetadata.updateSchemaIn(null, Schema.INT32_SCHEMA); assertEquals(null, updatedValue); } |
### Question:
WindowedStreamPartitioner implements StreamPartitioner<Windowed<K>, V> { public Integer partition(final Windowed<K> windowedKey, final V value, final int numPartitions) { final byte[] keyBytes = serializer.serializeBaseKey(topic, windowedKey); return toPositive(Utils.murmur2(keyBytes)) % numPartitions; } WindowedStreamPartitioner(final String topic, final WindowedSerializer<K> serializer); Integer partition(final Windowed<K> windowedKey, final V value, final int numPartitions); }### Answer:
@Test public void testCopartitioning() { Random rand = new Random(); DefaultPartitioner defaultPartitioner = new DefaultPartitioner(); WindowedSerializer<Integer> windowedSerializer = new WindowedSerializer<>(intSerializer); WindowedStreamPartitioner<Integer, String> streamPartitioner = new WindowedStreamPartitioner<>(topicName, windowedSerializer); for (int k = 0; k < 10; k++) { Integer key = rand.nextInt(); byte[] keyBytes = intSerializer.serialize(topicName, key); String value = key.toString(); byte[] valueBytes = stringSerializer.serialize(topicName, value); Integer expected = defaultPartitioner.partition("topic", key, keyBytes, value, valueBytes, cluster); for (int w = 1; w < 10; w++) { TimeWindow window = new TimeWindow(10 * w, 20 * w); Windowed<Integer> windowedKey = new Windowed<>(key, window); Integer actual = streamPartitioner.partition(windowedKey, value, infos.size()); assertEquals(expected, actual); } } } |
### Question:
KTableAggregate implements KTableProcessorSupplier<K, V, T> { @Override public Processor<K, Change<V>> get() { return new KTableAggregateProcessor(); } KTableAggregate(String storeName, Initializer<T> initializer, Aggregator<? super K, ? super V, T> add, Aggregator<? super K, ? super V, T> remove); @Override void enableSendingOldValues(); @Override Processor<K, Change<V>> get(); @Override KTableValueGetterSupplier<K, T> view(); }### Answer:
@Test public void shouldForwardToCorrectProcessorNodeWhenMultiCacheEvictions() throws Exception { final String tableOne = "tableOne"; final String tableTwo = "tableTwo"; final KStreamBuilder builder = new KStreamBuilder(); final String reduceTopic = "TestDriver-reducer-store-repartition"; final Map<String, Long> reduceResults = new HashMap<>(); final KTable<String, String> one = builder.table(Serdes.String(), Serdes.String(), tableOne, tableOne); final KTable<Long, String> two = builder.table(Serdes.Long(), Serdes.String(), tableTwo, tableTwo); final KTable<String, Long> reduce = two.groupBy(new KeyValueMapper<Long, String, KeyValue<String, Long>>() { @Override public KeyValue<String, Long> apply(final Long key, final String value) { return new KeyValue<>(value, key); } }, Serdes.String(), Serdes.Long()) .reduce(new Reducer<Long>() { @Override public Long apply(final Long value1, final Long value2) { return value1 + value2; } }, new Reducer<Long>() { @Override public Long apply(final Long value1, final Long value2) { return value1 - value2; } }, "reducer-store"); reduce.foreach(new ForeachAction<String, Long>() { @Override public void apply(final String key, final Long value) { reduceResults.put(key, value); } }); one.leftJoin(reduce, new ValueJoiner<String, Long, String>() { @Override public String apply(final String value1, final Long value2) { return value1 + ":" + value2; } }) .mapValues(new ValueMapper<String, String>() { @Override public String apply(final String value) { return value; } }); driver = new KStreamTestDriver(builder, stateDir, 111); driver.process(reduceTopic, "1", new Change<>(1L, null)); driver.process("tableOne", "2", "2"); driver.process(reduceTopic, "2", new Change<>(2L, null)); driver.process(reduceTopic, "2", new Change<>(2L, null)); assertEquals(Long.valueOf(2L), reduceResults.get("2")); driver.process("tableOne", "1", "5"); assertEquals(Long.valueOf(4L), reduceResults.get("2")); } |
### Question:
KStreamFlatMap implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KStreamFlatMapProcessor(); } KStreamFlatMap(KeyValueMapper<? super K, ? super V, ? extends Iterable<? extends KeyValue<? extends K1, ? extends V1>>> mapper); @Override Processor<K, V> get(); }### Answer:
@Test public void testFlatMap() { KStreamBuilder builder = new KStreamBuilder(); KeyValueMapper<Number, Object, Iterable<KeyValue<String, String>>> mapper = new KeyValueMapper<Number, Object, Iterable<KeyValue<String, String>>>() { @Override public Iterable<KeyValue<String, String>> apply(Number key, Object value) { ArrayList<KeyValue<String, String>> result = new ArrayList<>(); for (int i = 0; i < key.intValue(); i++) { result.add(KeyValue.pair(Integer.toString(key.intValue() * 10 + i), value.toString())); } return result; } }; final int[] expectedKeys = {0, 1, 2, 3}; KStream<Integer, String> stream; MockProcessorSupplier<String, String> processor; processor = new MockProcessorSupplier<>(); stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName); stream.flatMap(mapper).process(processor); driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, "V" + expectedKey); } assertEquals(6, processor.processed.size()); String[] expected = {"10:V1", "20:V2", "21:V2", "30:V3", "31:V3", "32:V3"}; for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], processor.processed.get(i)); } } |
### Question:
KStreamFlatMapValues implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KStreamFlatMapValuesProcessor(); } KStreamFlatMapValues(ValueMapper<? super V, ? extends Iterable<? extends V1>> mapper); @Override Processor<K, V> get(); }### Answer:
@Test public void testFlatMapValues() { KStreamBuilder builder = new KStreamBuilder(); ValueMapper<Number, Iterable<String>> mapper = new ValueMapper<Number, Iterable<String>>() { @Override public Iterable<String> apply(Number value) { ArrayList<String> result = new ArrayList<String>(); result.add("v" + value); result.add("V" + value); return result; } }; final int[] expectedKeys = {0, 1, 2, 3}; KStream<Integer, Integer> stream; MockProcessorSupplier<Integer, String> processor; processor = new MockProcessorSupplier<>(); stream = builder.stream(Serdes.Integer(), Serdes.Integer(), topicName); stream.flatMapValues(mapper).process(processor); driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, expectedKey); } assertEquals(8, processor.processed.size()); String[] expected = {"0:v0", "0:V0", "1:v1", "1:V1", "2:v2", "2:V2", "3:v3", "3:V3"}; for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], processor.processed.get(i)); } } |
### Question:
Cast implements Transformation<R> { @Override public void configure(Map<String, ?> props) { final SimpleConfig config = new SimpleConfig(CONFIG_DEF, props); casts = parseFieldTypes(config.getList(SPEC_CONFIG)); wholeValueCastType = casts.get(WHOLE_VALUE_CAST); schemaUpdateCache = new SynchronizedCache<>(new LRUCache<Schema, Schema>(16)); } @Override void configure(Map<String, ?> props); @Override R apply(R record); @Override ConfigDef config(); @Override void close(); static final String OVERVIEW_DOC; static final String SPEC_CONFIG; static final ConfigDef CONFIG_DEF; }### Answer:
@Test(expected = ConfigException.class) public void testConfigEmpty() { final Cast<SourceRecord> xform = new Cast.Key<>(); xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "")); }
@Test(expected = ConfigException.class) public void testConfigInvalidSchemaType() { final Cast<SourceRecord> xform = new Cast.Key<>(); xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:faketype")); }
@Test(expected = ConfigException.class) public void testConfigInvalidTargetType() { final Cast<SourceRecord> xform = new Cast.Key<>(); xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:array")); }
@Test(expected = ConfigException.class) public void testConfigInvalidMap() { final Cast<SourceRecord> xform = new Cast.Key<>(); xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8:extra")); }
@Test(expected = ConfigException.class) public void testConfigMixWholeAndFieldTransformation() { final Cast<SourceRecord> xform = new Cast.Key<>(); xform.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8,int32")); } |
### Question:
KStreamBranch implements ProcessorSupplier<K, V> { @SuppressWarnings("unchecked") public KStreamBranch(Predicate<K, V> ... predicates) { this.predicates = predicates; } @SuppressWarnings("unchecked") KStreamBranch(Predicate<K, V> ... predicates); @Override Processor<K, V> get(); }### Answer:
@SuppressWarnings("unchecked") @Test public void testKStreamBranch() { KStreamBuilder builder = new KStreamBuilder(); builder.setApplicationId("X"); Predicate<Integer, String> isEven = new Predicate<Integer, String>() { @Override public boolean test(Integer key, String value) { return (key % 2) == 0; } }; Predicate<Integer, String> isMultipleOfThree = new Predicate<Integer, String>() { @Override public boolean test(Integer key, String value) { return (key % 3) == 0; } }; Predicate<Integer, String> isOdd = new Predicate<Integer, String>() { @Override public boolean test(Integer key, String value) { return (key % 2) != 0; } }; final int[] expectedKeys = new int[]{1, 2, 3, 4, 5, 6}; KStream<Integer, String> stream; KStream<Integer, String>[] branches; MockProcessorSupplier<Integer, String>[] processors; stream = builder.stream(Serdes.Integer(), Serdes.String(), topicName); branches = stream.branch(isEven, isMultipleOfThree, isOdd); assertEquals(3, branches.length); processors = (MockProcessorSupplier<Integer, String>[]) Array.newInstance(MockProcessorSupplier.class, branches.length); for (int i = 0; i < branches.length; i++) { processors[i] = new MockProcessorSupplier<>(); branches[i].process(processors[i]); } driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, "V" + expectedKey); } assertEquals(3, processors[0].processed.size()); assertEquals(1, processors[1].processed.size()); assertEquals(2, processors[2].processed.size()); } |
### Question:
KGroupedTableImpl extends AbstractStream<K> implements KGroupedTable<K, V> { @Override public KTable<K, Long> count(final String queryableStoreName) { determineIsQueryable(queryableStoreName); return count(keyValueStore(keySerde, Serdes.Long(), getOrCreateName(queryableStoreName, AGGREGATE_NAME))); } KGroupedTableImpl(final KStreamBuilder topology,
final String name,
final String sourceName,
final Serde<? extends K> keySerde,
final Serde<? extends V> valSerde); @Override KTable<K, T> aggregate(final Initializer<T> initializer,
final Aggregator<? super K, ? super V, T> adder,
final Aggregator<? super K, ? super V, T> subtractor,
final Serde<T> aggValueSerde,
final String queryableStoreName); @Override KTable<K, T> aggregate(final Initializer<T> initializer,
final Aggregator<? super K, ? super V, T> adder,
final Aggregator<? super K, ? super V, T> subtractor,
final Serde<T> aggValueSerde); @Override KTable<K, T> aggregate(final Initializer<T> initializer,
final Aggregator<? super K, ? super V, T> adder,
final Aggregator<? super K, ? super V, T> subtractor,
final String queryableStoreName); @Override KTable<K, T> aggregate(final Initializer<T> initializer,
final Aggregator<? super K, ? super V, T> adder,
final Aggregator<? super K, ? super V, T> subtractor); @Override KTable<K, T> aggregate(final Initializer<T> initializer,
final Aggregator<? super K, ? super V, T> adder,
final Aggregator<? super K, ? super V, T> subtractor,
final StateStoreSupplier<KeyValueStore> storeSupplier); @Override KTable<K, V> reduce(final Reducer<V> adder,
final Reducer<V> subtractor,
final String queryableStoreName); @Override KTable<K, V> reduce(final Reducer<V> adder,
final Reducer<V> subtractor); @Override KTable<K, V> reduce(final Reducer<V> adder,
final Reducer<V> subtractor,
final StateStoreSupplier<KeyValueStore> storeSupplier); @Override KTable<K, Long> count(final String queryableStoreName); @Override KTable<K, Long> count(); @Override KTable<K, Long> count(final StateStoreSupplier<KeyValueStore> storeSupplier); }### Answer:
@Test public void shouldAllowNullStoreNameOnCount() { groupedTable.count((String) null); } |
### Question:
SessionKeySerde implements Serde<Windowed<K>> { @Override public Serializer<Windowed<K>> serializer() { return new SessionKeySerializer(keySerde.serializer()); } SessionKeySerde(final Serde<K> keySerde); @Override void configure(final Map<String, ?> configs, final boolean isKey); @Override void close(); @Override Serializer<Windowed<K>> serializer(); @Override Deserializer<Windowed<K>> deserializer(); static long extractEnd(final byte[] binaryKey); static long extractStart(final byte[] binaryKey); static byte[] extractKeyBytes(final byte[] binaryKey); static Windowed<K> from(final byte[] binaryKey, final Deserializer<K> keyDeserializer, final String topic); static Windowed<Bytes> fromBytes(Bytes bytesKey); static Bytes toBinary(final Windowed<K> sessionKey, final Serializer<K> serializer, final String topic); static Bytes bytesToBinary(final Windowed<Bytes> sessionKey); }### Answer:
@Test public void shouldSerializeNullToNull() throws Exception { assertNull(sessionKeySerde.serializer().serialize(topic, null)); } |
### Question:
SessionKeySerde implements Serde<Windowed<K>> { @Override public Deserializer<Windowed<K>> deserializer() { return new SessionKeyDeserializer(keySerde.deserializer()); } SessionKeySerde(final Serde<K> keySerde); @Override void configure(final Map<String, ?> configs, final boolean isKey); @Override void close(); @Override Serializer<Windowed<K>> serializer(); @Override Deserializer<Windowed<K>> deserializer(); static long extractEnd(final byte[] binaryKey); static long extractStart(final byte[] binaryKey); static byte[] extractKeyBytes(final byte[] binaryKey); static Windowed<K> from(final byte[] binaryKey, final Deserializer<K> keyDeserializer, final String topic); static Windowed<Bytes> fromBytes(Bytes bytesKey); static Bytes toBinary(final Windowed<K> sessionKey, final Serializer<K> serializer, final String topic); static Bytes bytesToBinary(final Windowed<Bytes> sessionKey); }### Answer:
@Test public void shouldDeSerializeEmtpyByteArrayToNull() throws Exception { assertNull(sessionKeySerde.deserializer().deserialize(topic, new byte[0])); }
@Test public void shouldDeSerializeNullToNull() throws Exception { assertNull(sessionKeySerde.deserializer().deserialize(topic, null)); } |
### Question:
KStreamMap implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KStreamMapProcessor(); } KStreamMap(KeyValueMapper<? super K, ? super V, ? extends KeyValue<? extends K1, ? extends V1>> mapper); @Override Processor<K, V> get(); }### Answer:
@Test public void testMap() { KStreamBuilder builder = new KStreamBuilder(); KeyValueMapper<Integer, String, KeyValue<String, Integer>> mapper = new KeyValueMapper<Integer, String, KeyValue<String, Integer>>() { @Override public KeyValue<String, Integer> apply(Integer key, String value) { return KeyValue.pair(value, key); } }; final int[] expectedKeys = new int[]{0, 1, 2, 3}; KStream<Integer, String> stream = builder.stream(intSerde, stringSerde, topicName); MockProcessorSupplier<String, Integer> processor; processor = new MockProcessorSupplier<>(); stream.map(mapper).process(processor); driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, "V" + expectedKey); } assertEquals(4, processor.processed.size()); String[] expected = new String[]{"V0:0", "V1:1", "V2:2", "V3:3"}; for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], processor.processed.get(i)); } } |
### Question:
KStreamTransform implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KStreamTransformProcessor<>(transformerSupplier.get()); } KStreamTransform(TransformerSupplier<? super K, ? super V, ? extends KeyValue<? extends K1, ? extends V1>> transformerSupplier); @Override Processor<K, V> get(); }### Answer:
@Test public void testTransform() { KStreamBuilder builder = new KStreamBuilder(); TransformerSupplier<Number, Number, KeyValue<Integer, Integer>> transformerSupplier = new TransformerSupplier<Number, Number, KeyValue<Integer, Integer>>() { public Transformer<Number, Number, KeyValue<Integer, Integer>> get() { return new Transformer<Number, Number, KeyValue<Integer, Integer>>() { private int total = 0; @Override public void init(ProcessorContext context) { } @Override public KeyValue<Integer, Integer> transform(Number key, Number value) { total += value.intValue(); return KeyValue.pair(key.intValue() * 2, total); } @Override public KeyValue<Integer, Integer> punctuate(long timestamp) { return KeyValue.pair(-1, (int) timestamp); } @Override public void close() { } }; } }; final int[] expectedKeys = {1, 10, 100, 1000}; MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>(); KStream<Integer, Integer> stream = builder.stream(intSerde, intSerde, topicName); stream.transform(transformerSupplier).process(processor); driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, expectedKey * 10); } driver.punctuate(2); driver.punctuate(3); assertEquals(6, processor.processed.size()); String[] expected = {"2:10", "20:110", "200:1110", "2000:11110", "-1:2", "-1:3"}; for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], processor.processed.get(i)); } } |
### Question:
KTableSource implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KTableSourceProcessor(); } KTableSource(String storeName); @Override Processor<K, V> get(); void enableSendingOldValues(); final String storeName; }### Answer:
@Test public void testValueGetter() throws IOException { final KStreamBuilder builder = new KStreamBuilder(); String topic1 = "topic1"; KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(stringSerde, stringSerde, topic1, "anyStoreName"); KTableValueGetterSupplier<String, String> getterSupplier1 = table1.valueGetterSupplier(); driver = new KStreamTestDriver(builder, stateDir, null, null); KTableValueGetter<String, String> getter1 = getterSupplier1.get(); getter1.init(driver.context()); driver.process(topic1, "A", "01"); driver.process(topic1, "B", "01"); driver.process(topic1, "C", "01"); assertEquals("01", getter1.get("A")); assertEquals("01", getter1.get("B")); assertEquals("01", getter1.get("C")); driver.process(topic1, "A", "02"); driver.process(topic1, "B", "02"); assertEquals("02", getter1.get("A")); assertEquals("02", getter1.get("B")); assertEquals("01", getter1.get("C")); driver.process(topic1, "A", "03"); assertEquals("03", getter1.get("A")); assertEquals("02", getter1.get("B")); assertEquals("01", getter1.get("C")); driver.process(topic1, "A", null); driver.process(topic1, "B", null); assertNull(getter1.get("A")); assertNull(getter1.get("B")); assertEquals("01", getter1.get("C")); } |
### Question:
KTableSource implements ProcessorSupplier<K, V> { public void enableSendingOldValues() { sendOldValues = true; } KTableSource(String storeName); @Override Processor<K, V> get(); void enableSendingOldValues(); final String storeName; }### Answer:
@Test public void testSendingOldValue() throws IOException { final KStreamBuilder builder = new KStreamBuilder(); String topic1 = "topic1"; KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(stringSerde, stringSerde, topic1, "anyStoreName"); table1.enableSendingOldValues(); assertTrue(table1.sendingOldValueEnabled()); MockProcessorSupplier<String, Integer> proc1 = new MockProcessorSupplier<>(); builder.addProcessor("proc1", proc1, table1.name); driver = new KStreamTestDriver(builder, stateDir, null, null); driver.process(topic1, "A", "01"); driver.process(topic1, "B", "01"); driver.process(topic1, "C", "01"); driver.flushState(); proc1.checkAndClearProcessResult("A:(01<-null)", "B:(01<-null)", "C:(01<-null)"); driver.process(topic1, "A", "02"); driver.process(topic1, "B", "02"); driver.flushState(); proc1.checkAndClearProcessResult("A:(02<-01)", "B:(02<-01)"); driver.process(topic1, "A", "03"); driver.flushState(); proc1.checkAndClearProcessResult("A:(03<-02)"); driver.process(topic1, "A", null); driver.process(topic1, "B", null); driver.flushState(); proc1.checkAndClearProcessResult("A:(null<-03)", "B:(null<-02)"); } |
### Question:
KTableMapValues implements KTableProcessorSupplier<K, V, V1> { @Override public void enableSendingOldValues() { parent.enableSendingOldValues(); sendOldValues = true; } KTableMapValues(final KTableImpl<K, ?, V> parent, final ValueMapper<? super V, ? extends V1> mapper,
final String queryableName); @Override Processor<K, Change<V>> get(); @Override KTableValueGetterSupplier<K, V1> view(); @Override void enableSendingOldValues(); }### Answer:
@Test public void testSendingOldValue() throws IOException { KStreamBuilder builder = new KStreamBuilder(); String topic1 = "topic1"; KTableImpl<String, String, String> table1 = (KTableImpl<String, String, String>) builder.table(stringSerde, stringSerde, topic1, "anyStoreName"); KTableImpl<String, String, Integer> table2 = (KTableImpl<String, String, Integer>) table1.mapValues( new ValueMapper<String, Integer>() { @Override public Integer apply(String value) { return new Integer(value); } }); table2.enableSendingOldValues(); MockProcessorSupplier<String, Integer> proc = new MockProcessorSupplier<>(); builder.addProcessor("proc", proc, table2.name); driver = new KStreamTestDriver(builder, stateDir, null, null); assertTrue(table1.sendingOldValueEnabled()); assertTrue(table2.sendingOldValueEnabled()); driver.process(topic1, "A", "01"); driver.process(topic1, "B", "01"); driver.process(topic1, "C", "01"); driver.flushState(); proc.checkAndClearProcessResult("A:(1<-null)", "B:(1<-null)", "C:(1<-null)"); driver.process(topic1, "A", "02"); driver.process(topic1, "B", "02"); driver.flushState(); proc.checkAndClearProcessResult("A:(2<-1)", "B:(2<-1)"); driver.process(topic1, "A", "03"); driver.flushState(); proc.checkAndClearProcessResult("A:(3<-2)"); driver.process(topic1, "A", null); driver.flushState(); proc.checkAndClearProcessResult("A:(null<-3)"); } |
### Question:
ByteArrayConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { if (schema != null && schema.type() != Schema.Type.BYTES) throw new DataException("Invalid schema type for ByteArrayConverter: " + schema.type().toString()); if (value != null && !(value instanceof byte[])) throw new DataException("ByteArrayConverter is not compatible with objects of type " + value.getClass()); return (byte[]) value; } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }### Answer:
@Test public void testFromConnect() { assertArrayEquals( SAMPLE_BYTES, converter.fromConnectData(TOPIC, Schema.BYTES_SCHEMA, SAMPLE_BYTES) ); }
@Test public void testFromConnectSchemaless() { assertArrayEquals( SAMPLE_BYTES, converter.fromConnectData(TOPIC, null, SAMPLE_BYTES) ); }
@Test(expected = DataException.class) public void testFromConnectBadSchema() { converter.fromConnectData(TOPIC, Schema.INT32_SCHEMA, SAMPLE_BYTES); }
@Test(expected = DataException.class) public void testFromConnectInvalidValue() { converter.fromConnectData(TOPIC, Schema.BYTES_SCHEMA, 12); }
@Test public void testFromConnectNull() { assertNull(converter.fromConnectData(TOPIC, Schema.BYTES_SCHEMA, null)); } |
### Question:
KStreamTransformValues implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KStreamTransformValuesProcessor<>(valueTransformerSupplier.get()); } KStreamTransformValues(ValueTransformerSupplier<V, R> valueTransformerSupplier); @Override Processor<K, V> get(); }### Answer:
@Test public void testTransform() { KStreamBuilder builder = new KStreamBuilder(); ValueTransformerSupplier<Number, Integer> valueTransformerSupplier = new ValueTransformerSupplier<Number, Integer>() { public ValueTransformer<Number, Integer> get() { return new ValueTransformer<Number, Integer>() { private int total = 0; @Override public void init(ProcessorContext context) { } @Override public Integer transform(Number value) { total += value.intValue(); return total; } @Override public Integer punctuate(long timestamp) { return null; } @Override public void close() { } }; } }; final int[] expectedKeys = {1, 10, 100, 1000}; KStream<Integer, Integer> stream; MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>(); stream = builder.stream(intSerde, intSerde, topicName); stream.transformValues(valueTransformerSupplier).process(processor); driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, expectedKey * 10); } assertEquals(4, processor.processed.size()); String[] expected = {"1:10", "10:110", "100:1110", "1000:11110"}; for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], processor.processed.get(i)); } }
@Test public void shouldNotAllowValueTransformerToCallInternalProcessorContextMethods() { final KStreamTransformValues<Integer, Integer, Integer> transformValue = new KStreamTransformValues<>(new ValueTransformerSupplier<Integer, Integer>() { @Override public ValueTransformer<Integer, Integer> get() { return new BadValueTransformer(); } }); final Processor transformValueProcessor = transformValue.get(); transformValueProcessor.init(null); try { transformValueProcessor.process(null, 0); fail("should not allow call to context.forward() within ValueTransformer"); } catch (final StreamsException e) { } try { transformValueProcessor.process(null, 1); fail("should not allow call to context.forward() within ValueTransformer"); } catch (final StreamsException e) { } try { transformValueProcessor.process(null, 2); fail("should not allow call to context.forward() within ValueTransformer"); } catch (final StreamsException e) { } try { transformValueProcessor.punctuate(0); fail("should not allow ValueTransformer#puntuate() to return not-null value"); } catch (final StreamsException e) { } } |
### Question:
KStreamMapValues implements ProcessorSupplier<K, V> { @Override public Processor<K, V> get() { return new KStreamMapProcessor(); } KStreamMapValues(ValueMapper<V, V1> mapper); @Override Processor<K, V> get(); }### Answer:
@Test public void testFlatMapValues() { KStreamBuilder builder = new KStreamBuilder(); ValueMapper<CharSequence, Integer> mapper = new ValueMapper<CharSequence, Integer>() { @Override public Integer apply(CharSequence value) { return value.length(); } }; final int[] expectedKeys = {1, 10, 100, 1000}; KStream<Integer, String> stream; MockProcessorSupplier<Integer, Integer> processor = new MockProcessorSupplier<>(); stream = builder.stream(intSerde, stringSerde, topicName); stream.mapValues(mapper).process(processor); driver = new KStreamTestDriver(builder); for (int expectedKey : expectedKeys) { driver.process(topicName, expectedKey, Integer.toString(expectedKey)); } assertEquals(4, processor.processed.size()); String[] expected = {"1:1", "10:2", "100:3", "1000:4"}; for (int i = 0; i < expected.length; i++) { assertEquals(expected[i], processor.processed.get(i)); } } |
### Question:
ByteArrayConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { return new SchemaAndValue(Schema.OPTIONAL_BYTES_SCHEMA, value); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }### Answer:
@Test public void testToConnect() { SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_BYTES); assertEquals(Schema.OPTIONAL_BYTES_SCHEMA, data.schema()); assertTrue(Arrays.equals(SAMPLE_BYTES, (byte[]) data.value())); }
@Test public void testToConnectNull() { SchemaAndValue data = converter.toConnectData(TOPIC, null); assertEquals(Schema.OPTIONAL_BYTES_SCHEMA, data.schema()); assertNull(data.value()); } |
### Question:
SessionWindows { public static SessionWindows with(final long inactivityGapMs) { if (inactivityGapMs <= 0) { throw new IllegalArgumentException("Gap time (inactivityGapMs) cannot be zero or negative."); } return new SessionWindows(inactivityGapMs); } private SessionWindows(final long gapMs); static SessionWindows with(final long inactivityGapMs); SessionWindows until(final long durationMs); long inactivityGap(); long maintainMs(); }### Answer:
@Test(expected = IllegalArgumentException.class) public void windowSizeMustNotBeNegative() { SessionWindows.with(-1); }
@Test(expected = IllegalArgumentException.class) public void windowSizeMustNotBeZero() { SessionWindows.with(0); } |
### Question:
JoinWindows extends Windows<Window> { public static JoinWindows of(final long timeDifferenceMs) throws IllegalArgumentException { return new JoinWindows(timeDifferenceMs, timeDifferenceMs); } private JoinWindows(final long beforeMs, final long afterMs); static JoinWindows of(final long timeDifferenceMs); JoinWindows before(final long timeDifferenceMs); JoinWindows after(final long timeDifferenceMs); @Override Map<Long, Window> windowsFor(final long timestamp); @Override long size(); @Override JoinWindows until(final long durationMs); @Override long maintainMs(); @Override final boolean equals(final Object o); @Override int hashCode(); final long beforeMs; final long afterMs; }### Answer:
@Test(expected = IllegalArgumentException.class) public void timeDifferenceMustNotBeNegative() { JoinWindows.of(-1); } |
### Question:
DefaultPartitionGrouper implements PartitionGrouper { public Map<TaskId, Set<TopicPartition>> partitionGroups(Map<Integer, Set<String>> topicGroups, Cluster metadata) { Map<TaskId, Set<TopicPartition>> groups = new HashMap<>(); for (Map.Entry<Integer, Set<String>> entry : topicGroups.entrySet()) { Integer topicGroupId = entry.getKey(); Set<String> topicGroup = entry.getValue(); int maxNumPartitions = maxNumPartitions(metadata, topicGroup); for (int partitionId = 0; partitionId < maxNumPartitions; partitionId++) { Set<TopicPartition> group = new HashSet<>(topicGroup.size()); for (String topic : topicGroup) { List<PartitionInfo> partitions = metadata.partitionsForTopic(topic); if (partitionId < partitions.size()) { group.add(new TopicPartition(topic, partitionId)); } } groups.put(new TaskId(topicGroupId, partitionId), Collections.unmodifiableSet(group)); } } return Collections.unmodifiableMap(groups); } Map<TaskId, Set<TopicPartition>> partitionGroups(Map<Integer, Set<String>> topicGroups, Cluster metadata); }### Answer:
@Test public void shouldComputeGroupingForTwoGroups() { final PartitionGrouper grouper = new DefaultPartitionGrouper(); final Map<TaskId, Set<TopicPartition>> expectedPartitionsForTask = new HashMap<>(); final Map<Integer, Set<String>> topicGroups = new HashMap<>(); int topicGroupId = 0; topicGroups.put(topicGroupId, mkSet("topic1")); expectedPartitionsForTask.put(new TaskId(topicGroupId, 0), mkSet(new TopicPartition("topic1", 0))); expectedPartitionsForTask.put(new TaskId(topicGroupId, 1), mkSet(new TopicPartition("topic1", 1))); expectedPartitionsForTask.put(new TaskId(topicGroupId, 2), mkSet(new TopicPartition("topic1", 2))); topicGroups.put(++topicGroupId, mkSet("topic2")); expectedPartitionsForTask.put(new TaskId(topicGroupId, 0), mkSet(new TopicPartition("topic2", 0))); expectedPartitionsForTask.put(new TaskId(topicGroupId, 1), mkSet(new TopicPartition("topic2", 1))); assertEquals(expectedPartitionsForTask, grouper.partitionGroups(topicGroups, metadata)); }
@Test public void shouldComputeGroupingForSingleGroupWithMultipleTopics() { final PartitionGrouper grouper = new DefaultPartitionGrouper(); final Map<TaskId, Set<TopicPartition>> expectedPartitionsForTask = new HashMap<>(); final Map<Integer, Set<String>> topicGroups = new HashMap<>(); final int topicGroupId = 0; topicGroups.put(topicGroupId, mkSet("topic1", "topic2")); expectedPartitionsForTask.put( new TaskId(topicGroupId, 0), mkSet(new TopicPartition("topic1", 0), new TopicPartition("topic2", 0))); expectedPartitionsForTask.put( new TaskId(topicGroupId, 1), mkSet(new TopicPartition("topic1", 1), new TopicPartition("topic2", 1))); expectedPartitionsForTask.put( new TaskId(topicGroupId, 2), mkSet(new TopicPartition("topic1", 2))); assertEquals(expectedPartitionsForTask, grouper.partitionGroups(topicGroups, metadata)); }
@Test public void shouldNotCreateAnyTasksBecauseOneTopicHasUnknownPartitions() { final PartitionGrouper grouper = new DefaultPartitionGrouper(); final Map<TaskId, Set<TopicPartition>> expectedPartitionsForTask = new HashMap<>(); final Map<Integer, Set<String>> topicGroups = new HashMap<>(); final int topicGroupId = 0; topicGroups.put(topicGroupId, mkSet("topic1", "unknownTopic", "topic2")); assertEquals(expectedPartitionsForTask, grouper.partitionGroups(topicGroups, metadata)); } |
### Question:
WallclockTimestampExtractor implements TimestampExtractor { @Override public long extract(final ConsumerRecord<Object, Object> record, final long previousTimestamp) { return System.currentTimeMillis(); } @Override long extract(final ConsumerRecord<Object, Object> record, final long previousTimestamp); }### Answer:
@Test public void extractSystemTimestamp() { final TimestampExtractor extractor = new WallclockTimestampExtractor(); final long before = System.currentTimeMillis(); final long timestamp = extractor.extract(new ConsumerRecord<>("anyTopic", 0, 0, null, null), 42); final long after = System.currentTimeMillis(); assertThat(timestamp, is(new InBetween(before, after))); } |
### Question:
StateRestorer { void restore(final byte[] key, final byte[] value) { stateRestoreCallback.restore(key, value); } StateRestorer(final TopicPartition partition,
final StateRestoreCallback stateRestoreCallback,
final Long checkpoint,
final long offsetLimit,
final boolean persistent); TopicPartition partition(); }### Answer:
@Test public void shouldCallRestoreOnRestoreCallback() throws Exception { restorer.restore(new byte[0], new byte[0]); assertThat(callback.restored.size(), equalTo(1)); } |
### Question:
StateRestorer { boolean hasCompleted(final long recordOffset, final long endOffset) { return endOffset == 0 || recordOffset >= readTo(endOffset); } StateRestorer(final TopicPartition partition,
final StateRestoreCallback stateRestoreCallback,
final Long checkpoint,
final long offsetLimit,
final boolean persistent); TopicPartition partition(); }### Answer:
@Test public void shouldBeCompletedIfRecordOffsetGreaterThanEndOffset() throws Exception { assertTrue(restorer.hasCompleted(11, 10)); }
@Test public void shouldBeCompletedIfRecordOffsetGreaterThanOffsetLimit() throws Exception { assertTrue(restorer.hasCompleted(51, 100)); }
@Test public void shouldBeCompletedIfEndOffsetAndRecordOffsetAreZero() throws Exception { assertTrue(restorer.hasCompleted(0, 0)); }
@Test public void shouldBeCompletedIfOffsetAndOffsetLimitAreZero() throws Exception { final StateRestorer restorer = new StateRestorer(new TopicPartition("topic", 1), callback, null, 0, true); assertTrue(restorer.hasCompleted(0, 10)); } |
### Question:
AbstractProcessorContext implements InternalProcessorContext { @Override public void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback) { if (initialized) { throw new IllegalStateException("Can only create state stores during initialization."); } Objects.requireNonNull(store, "store must not be null"); stateManager.register(store, loggingEnabled, stateRestoreCallback); } AbstractProcessorContext(final TaskId taskId,
final String applicationId,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateManager stateManager,
final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }### Answer:
@Test public void shouldNotThrowIllegalStateExceptionOnRegisterWhenContextIsNotInitialized() throws Exception { context.register(stateStore, false, null); }
@Test(expected = NullPointerException.class) public void shouldThrowNullPointerOnRegisterIfStateStoreIsNull() { context.register(null, false, null); } |
### Question:
AbstractProcessorContext implements InternalProcessorContext { @Override public String topic() { if (recordContext == null) { throw new IllegalStateException("This should not happen as topic() should only be called while a record is processed"); } final String topic = recordContext.topic(); if (topic.equals(NONEXIST_TOPIC)) { return null; } return topic; } AbstractProcessorContext(final TaskId taskId,
final String applicationId,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateManager stateManager,
final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }### Answer:
@Test public void shouldReturnTopicFromRecordContext() throws Exception { assertThat(context.topic(), equalTo(recordContext.topic())); } |
### Question:
AbstractProcessorContext implements InternalProcessorContext { @Override public int partition() { if (recordContext == null) { throw new IllegalStateException("This should not happen as partition() should only be called while a record is processed"); } return recordContext.partition(); } AbstractProcessorContext(final TaskId taskId,
final String applicationId,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateManager stateManager,
final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }### Answer:
@Test public void shouldReturnPartitionFromRecordContext() throws Exception { assertThat(context.partition(), equalTo(recordContext.partition())); } |
### Question:
AbstractProcessorContext implements InternalProcessorContext { @Override public long offset() { if (recordContext == null) { throw new IllegalStateException("This should not happen as offset() should only be called while a record is processed"); } return recordContext.offset(); } AbstractProcessorContext(final TaskId taskId,
final String applicationId,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateManager stateManager,
final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }### Answer:
@Test public void shouldReturnOffsetFromRecordContext() throws Exception { assertThat(context.offset(), equalTo(recordContext.offset())); } |
### Question:
AbstractProcessorContext implements InternalProcessorContext { @Override public long timestamp() { if (recordContext == null) { throw new IllegalStateException("This should not happen as timestamp() should only be called while a record is processed"); } return recordContext.timestamp(); } AbstractProcessorContext(final TaskId taskId,
final String applicationId,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateManager stateManager,
final ThreadCache cache); @Override String applicationId(); @Override TaskId taskId(); @Override Serde<?> keySerde(); @Override Serde<?> valueSerde(); @Override File stateDir(); @Override StreamsMetrics metrics(); @Override void register(final StateStore store, final boolean loggingEnabled, final StateRestoreCallback stateRestoreCallback); @Override String topic(); @Override int partition(); @Override long offset(); @Override long timestamp(); @Override Map<String, Object> appConfigs(); @Override Map<String, Object> appConfigsWithPrefix(String prefix); @Override void setRecordContext(final RecordContext recordContext); @Override RecordContext recordContext(); @Override void setCurrentNode(final ProcessorNode currentNode); @Override ProcessorNode currentNode(); @Override ThreadCache getCache(); @Override void initialized(); }### Answer:
@Test public void shouldReturnTimestampFromRecordContext() throws Exception { assertThat(context.timestamp(), equalTo(recordContext.timestamp())); } |
### Question:
TopicAdmin implements AutoCloseable { public boolean createTopic(NewTopic topic) { if (topic == null) return false; Set<String> newTopicNames = createTopics(topic); return newTopicNames.contains(topic.name()); } TopicAdmin(Map<String, Object> adminConfig); TopicAdmin(Map<String, Object> adminConfig, AdminClient adminClient); static NewTopicBuilder defineTopic(String topicName); boolean createTopic(NewTopic topic); Set<String> createTopics(NewTopic... topics); @Override void close(); }### Answer:
@Test public void shouldReturnFalseWhenSuppliedNullTopicDescription() { Cluster cluster = createCluster(1); try (MockKafkaAdminClientEnv env = new MockKafkaAdminClientEnv(cluster)) { env.kafkaClient().setNode(cluster.controller()); env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareMetadataUpdate(env.cluster(), Collections.<String>emptySet()); TopicAdmin admin = new TopicAdmin(null, env.adminClient()); boolean created = admin.createTopic(null); assertFalse(created); } } |
### Question:
StandbyTask extends AbstractTask { Map<TopicPartition, Long> checkpointedOffsets() { return checkpointedOffsets; } StandbyTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory); @Override void resume(); @Override void commit(); @Override void suspend(); @Override void close(final boolean clean); List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition,
final List<ConsumerRecord<byte[], byte[]>> records); }### Answer:
@Test public void testStorePartitions() throws Exception { StreamsConfig config = createConfig(baseDir); StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory); assertEquals(Utils.mkSet(partition2), new HashSet<>(task.checkpointedOffsets().keySet())); } |
### Question:
StandbyTask extends AbstractTask { public List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition, final List<ConsumerRecord<byte[], byte[]>> records) { log.debug("{} Updating standby replicas of its state store for partition [{}]", logPrefix, partition); return stateMgr.updateStandbyStates(partition, records); } StandbyTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory); @Override void resume(); @Override void commit(); @Override void suspend(); @Override void close(final boolean clean); List<ConsumerRecord<byte[], byte[]>> update(final TopicPartition partition,
final List<ConsumerRecord<byte[], byte[]>> records); }### Answer:
@SuppressWarnings("unchecked") @Test public void testUpdate() throws Exception { StreamsConfig config = createConfig(baseDir); StandbyTask task = new StandbyTask(taskId, applicationId, topicPartitions, topology, consumer, changelogReader, config, null, stateDirectory); restoreStateConsumer.assign(new ArrayList<>(task.checkpointedOffsets().keySet())); for (ConsumerRecord<Integer, Integer> record : Arrays.asList( new ConsumerRecord<>(partition2.topic(), partition2.partition(), 10, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 1, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 2, 100), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, 3, 100))) { restoreStateConsumer.bufferRecord(record); } for (Map.Entry<TopicPartition, Long> entry : task.checkpointedOffsets().entrySet()) { TopicPartition partition = entry.getKey(); long offset = entry.getValue(); if (offset >= 0) { restoreStateConsumer.seek(partition, offset); } else { restoreStateConsumer.seekToBeginning(singleton(partition)); } } task.update(partition2, restoreStateConsumer.poll(100).records(partition2)); StandbyContextImpl context = (StandbyContextImpl) task.context(); MockStateStoreSupplier.MockStateStore store1 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName1); MockStateStoreSupplier.MockStateStore store2 = (MockStateStoreSupplier.MockStateStore) context.getStateMgr().getStore(storeName2); assertEquals(Collections.emptyList(), store1.keys); assertEquals(Utils.mkList(1, 2, 3), store2.keys); task.closeStateManager(true); File taskDir = stateDirectory.directoryForTask(taskId); OffsetCheckpoint checkpoint = new OffsetCheckpoint(new File(taskDir, ProcessorStateManager.CHECKPOINT_FILE_NAME)); Map<TopicPartition, Long> offsets = checkpoint.read(); assertEquals(1, offsets.size()); assertEquals(new Long(30L + 1L), offsets.get(partition2)); } |
### Question:
StateDirectory { File directoryForTask(final TaskId taskId) { final File taskDir = new File(stateDir, taskId.toString()); if (!taskDir.exists() && !taskDir.mkdir()) { throw new ProcessorStateException(String.format("task directory [%s] doesn't exist and couldn't be created", taskDir.getPath())); } return taskDir; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }### Answer:
@Test public void shouldCreateTaskStateDirectory() throws Exception { final TaskId taskId = new TaskId(0, 0); final File taskDirectory = directory.directoryForTask(taskId); assertTrue(taskDirectory.exists()); assertTrue(taskDirectory.isDirectory()); }
@Test(expected = ProcessorStateException.class) public void shouldThrowProcessorStateException() throws Exception { final TaskId taskId = new TaskId(0, 0); Utils.delete(stateDir); directory.directoryForTask(taskId); }
@Test public void shouldCreateDirectoriesIfParentDoesntExist() throws Exception { final File tempDir = TestUtils.tempDirectory(); final File stateDir = new File(new File(tempDir, "foo"), "state-dir"); final StateDirectory stateDirectory = new StateDirectory(applicationId, stateDir.getPath(), time); final File taskDir = stateDirectory.directoryForTask(new TaskId(0, 0)); assertTrue(stateDir.exists()); assertTrue(taskDir.exists()); } |
### Question:
StateDirectory { boolean lock(final TaskId taskId, int retry) throws IOException { final File lockFile; if (locks.containsKey(taskId)) { log.trace("{} Found cached state dir lock for task {}", logPrefix, taskId); return true; } try { lockFile = new File(directoryForTask(taskId), LOCK_FILE_NAME); } catch (ProcessorStateException e) { return false; } final FileChannel channel; try { channel = getOrCreateFileChannel(taskId, lockFile.toPath()); } catch (NoSuchFileException e) { return false; } final FileLock lock = tryLock(retry, channel); if (lock != null) { locks.put(taskId, lock); log.debug("{} Acquired state dir lock for task {}", logPrefix, taskId); } return lock != null; } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }### Answer:
@Test public void shouldNotLockDeletedDirectory() throws Exception { final TaskId taskId = new TaskId(0, 0); Utils.delete(stateDir); assertFalse(directory.lock(taskId, 0)); } |
### Question:
StateDirectory { public void cleanRemovedTasks(final long cleanupDelayMs) { final File[] taskDirs = listTaskDirectories(); if (taskDirs == null || taskDirs.length == 0) { return; } for (File taskDir : taskDirs) { final String dirName = taskDir.getName(); TaskId id = TaskId.parse(dirName); if (!locks.containsKey(id)) { try { if (lock(id, 0)) { if (time.milliseconds() > taskDir.lastModified() + cleanupDelayMs) { log.info("{} Deleting obsolete state directory {} for task {} as cleanup delay of {} ms has passed", logPrefix, dirName, id, cleanupDelayMs); Utils.delete(taskDir); } } } catch (OverlappingFileLockException e) { } catch (IOException e) { log.error("{} Failed to lock the state directory due to an unexpected exception", logPrefix, e); } finally { try { unlock(id); } catch (IOException e) { log.error("{} Failed to release the state directory lock", logPrefix); } } } } } StateDirectory(final String applicationId, final String stateDirConfig, final Time time); StateDirectory(final String applicationId, final String threadId, final String stateDirConfig, final Time time); void cleanRemovedTasks(final long cleanupDelayMs); }### Answer:
@Test public void shouldNotRemoveNonTaskDirectoriesAndFiles() throws Exception { final File otherDir = TestUtils.tempDirectory(stateDir.toPath(), "foo"); directory.cleanRemovedTasks(0); assertTrue(otherDir.exists()); } |
### Question:
StreamThread extends Thread { String threadClientId() { return threadClientId; } StreamThread(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier,
final String applicationId,
final String clientId,
final UUID processId,
final Metrics metrics,
final Time time,
final StreamsMetadataState streamsMetadataState,
final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }### Answer:
@Test public void testMetrics() throws Exception { final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0); final String defaultGroupName = "stream-metrics"; final String defaultPrefix = "thread." + thread.threadClientId(); final Map<String, String> defaultTags = Collections.singletonMap("client-id", thread.threadClientId()); assertNotNull(metrics.getSensor(defaultPrefix + ".commit-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".poll-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".process-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".punctuate-latency")); assertNotNull(metrics.getSensor(defaultPrefix + ".task-created")); assertNotNull(metrics.getSensor(defaultPrefix + ".task-closed")); assertNotNull(metrics.getSensor(defaultPrefix + ".skipped-records")); assertNotNull(metrics.metrics().get(metrics.metricName("commit-latency-avg", defaultGroupName, "The average commit time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("commit-latency-max", defaultGroupName, "The maximum commit time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("commit-rate", defaultGroupName, "The average per-second number of commit calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("poll-latency-avg", defaultGroupName, "The average poll time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("poll-latency-max", defaultGroupName, "The maximum poll time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("poll-rate", defaultGroupName, "The average per-second number of record-poll calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("process-latency-avg", defaultGroupName, "The average process time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("process-latency-max", defaultGroupName, "The maximum process time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("process-rate", defaultGroupName, "The average per-second number of process calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-latency-avg", defaultGroupName, "The average punctuate time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-latency-max", defaultGroupName, "The maximum punctuate time in ms", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("punctuate-rate", defaultGroupName, "The average per-second number of punctuate calls", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("task-created-rate", defaultGroupName, "The average per-second number of newly created tasks", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("task-closed-rate", defaultGroupName, "The average per-second number of closed tasks", defaultTags))); assertNotNull(metrics.metrics().get(metrics.metricName("skipped-records-rate", defaultGroupName, "The average per-second number of skipped records.", defaultTags))); } |
### Question:
StreamThread extends Thread { protected void maybeCommit(final long now) { if (commitTimeMs >= 0 && lastCommitMs + commitTimeMs < now) { log.debug("{} Committing all active tasks {} and standby tasks {} because the commit interval {}ms has elapsed by {}ms", logPrefix, activeTasks.keySet(), standbyTasks.keySet(), commitTimeMs, now - lastCommitMs); commitAll(); lastCommitMs = now; processStandbyRecords = true; } } StreamThread(final TopologyBuilder builder,
final StreamsConfig config,
final KafkaClientSupplier clientSupplier,
final String applicationId,
final String clientId,
final UUID processId,
final Metrics metrics,
final Time time,
final StreamsMetadataState streamsMetadataState,
final long cacheSizeBytes); @Override void run(); synchronized void close(); synchronized boolean isInitialized(); synchronized boolean stillRunning(); Map<TaskId, StreamTask> tasks(); Set<TaskId> prevActiveTasks(); Set<TaskId> cachedTasks(); void setStateListener(final StreamThread.StateListener listener); synchronized State state(); @Override String toString(); String toString(final String indent); final String applicationId; final String clientId; final UUID processId; }### Answer:
@Test public void testMaybeCommit() throws Exception { final File baseDir = Files.createTempDirectory("test").toFile(); try { final long commitInterval = 1000L; final Properties props = configProps(false); props.setProperty(StreamsConfig.STATE_DIR_CONFIG, baseDir.getCanonicalPath()); props.setProperty(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, Long.toString(commitInterval)); final StreamsConfig config = new StreamsConfig(props); builder.addSource("source1", "topic1"); final StreamThread thread = new StreamThread( builder, config, clientSupplier, applicationId, clientId, processId, metrics, mockTime, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override public void maybeCommit(final long now) { super.maybeCommit(now); } @Override protected StreamTask createStreamTask(final TaskId id, final Collection<TopicPartition> partitionsForTask) { final ProcessorTopology topology = builder.build(id.topicGroupId); return new TestStreamTask( id, applicationId, partitionsForTask, topology, consumer, clientSupplier.getProducer(new HashMap<String, Object>()), restoreConsumer, config, new MockStreamsMetrics(new Metrics()), stateDirectory); } }; initPartitionGrouper(config, thread, clientSupplier); final ConsumerRebalanceListener rebalanceListener = thread.rebalanceListener; final List<TopicPartition> revokedPartitions; final List<TopicPartition> assignedPartitions; revokedPartitions = Collections.emptyList(); assignedPartitions = Arrays.asList(t1p1, t1p2); rebalanceListener.onPartitionsRevoked(revokedPartitions); rebalanceListener.onPartitionsAssigned(assignedPartitions); assertEquals(2, thread.tasks().size()); mockTime.sleep(commitInterval - 10L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertFalse(((TestStreamTask) task).committed); } mockTime.sleep(11L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertTrue(((TestStreamTask) task).committed); ((TestStreamTask) task).committed = false; } mockTime.sleep(commitInterval - 10L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertFalse(((TestStreamTask) task).committed); } mockTime.sleep(11L); thread.maybeCommit(mockTime.milliseconds()); for (final StreamTask task : thread.tasks().values()) { assertTrue(((TestStreamTask) task).committed); ((TestStreamTask) task).committed = false; } } finally { Utils.delete(baseDir); } } |
### Question:
StreamsMetricsImpl implements StreamsMetrics { @Override public void removeSensor(Sensor sensor) { Sensor parent = null; Objects.requireNonNull(sensor, "Sensor is null"); metrics.removeSensor(sensor.name()); parent = parentSensors.get(sensor); if (parent != null) { metrics.removeSensor(parent.name()); } } StreamsMetricsImpl(Metrics metrics, String groupName, Map<String, String> tags); Metrics registry(); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel); @Override Sensor addSensor(String name, Sensor.RecordingLevel recordingLevel, Sensor... parents); @Override Map<MetricName, ? extends Metric> metrics(); @Override void recordLatency(Sensor sensor, long startNs, long endNs); @Override void recordThroughput(Sensor sensor, long value); @Override Sensor addLatencyAndThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); @Override Sensor addThroughputSensor(String scopeName, String entityName, String operationName, Sensor.RecordingLevel recordingLevel, String... tags); void measureLatencyNs(final Time time, final Runnable action, final Sensor sensor); @Override void removeSensor(Sensor sensor); }### Answer:
@Test(expected = NullPointerException.class) public void testRemoveNullSensor() { String groupName = "doesNotMatter"; Map<String, String> tags = new HashMap<>(); StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags); streamsMetrics.removeSensor(null); }
@Test public void testRemoveSensor() { String groupName = "doesNotMatter"; String sensorName = "sensor1"; String scope = "scope"; String entity = "entity"; String operation = "put"; Map<String, String> tags = new HashMap<>(); StreamsMetricsImpl streamsMetrics = new StreamsMetricsImpl(new Metrics(), groupName, tags); Sensor sensor1 = streamsMetrics.addSensor(sensorName, Sensor.RecordingLevel.DEBUG); streamsMetrics.removeSensor(sensor1); Sensor sensor1a = streamsMetrics.addSensor(sensorName, Sensor.RecordingLevel.DEBUG, sensor1); streamsMetrics.removeSensor(sensor1a); Sensor sensor2 = streamsMetrics.addLatencyAndThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG); streamsMetrics.removeSensor(sensor2); Sensor sensor3 = streamsMetrics.addThroughputSensor(scope, entity, operation, Sensor.RecordingLevel.DEBUG); streamsMetrics.removeSensor(sensor3); } |
### Question:
GlobalStateManagerImpl implements GlobalStateManager { @Override public Set<String> initialize(final InternalProcessorContext processorContext) { try { if (!stateDirectory.lockGlobalState(MAX_LOCK_ATTEMPTS)) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } } catch (IOException e) { throw new LockException(String.format("Failed to lock the global state directory: %s", baseDir)); } try { this.checkpointableOffsets.putAll(checkpoint.read()); } catch (IOException e) { try { stateDirectory.unlockGlobalState(); } catch (IOException e1) { log.error("failed to unlock the global state directory", e); } throw new StreamsException("Failed to read checkpoints for global state stores", e); } final List<StateStore> stateStores = topology.globalStateStores(); for (final StateStore stateStore : stateStores) { globalStoreNames.add(stateStore.name()); stateStore.init(processorContext, stateStore); } return Collections.unmodifiableSet(globalStoreNames); } GlobalStateManagerImpl(final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final StateDirectory stateDirectory); @Override Set<String> initialize(final InternalProcessorContext processorContext); @Override StateStore getGlobalStore(final String name); @Override StateStore getStore(final String name); File baseDir(); void register(final StateStore store,
final boolean ignored,
final StateRestoreCallback stateRestoreCallback); @Override void flush(); @Override void close(final Map<TopicPartition, Long> offsets); @Override void checkpoint(final Map<TopicPartition, Long> offsets); @Override Map<TopicPartition, Long> checkpointed(); }### Answer:
@Test public void shouldLockGlobalStateDirectory() throws Exception { stateManager.initialize(context); assertTrue(new File(stateDirectory.globalStateDir(), ".lock").exists()); }
@Test(expected = LockException.class) public void shouldThrowLockExceptionIfCantGetLock() throws Exception { final StateDirectory stateDir = new StateDirectory("appId", stateDirPath, time); try { stateDir.lockGlobalState(1); stateManager.initialize(context); } finally { stateDir.unlockGlobalState(); } }
@Test public void shouldNotDeleteCheckpointFileAfterLoaded() throws Exception { writeCheckpoint(); stateManager.initialize(context); assertTrue(checkpointFile.exists()); }
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfFailedToReadCheckpointedOffsets() throws Exception { writeCorruptCheckpoint(); stateManager.initialize(context); }
@Test public void shouldInitializeStateStores() throws Exception { stateManager.initialize(context); assertTrue(store1.initialized); assertTrue(store2.initialized); }
@Test public void shouldReturnInitializedStoreNames() throws Exception { final Set<String> storeNames = stateManager.initialize(context); assertEquals(Utils.mkSet(store1.name(), store2.name()), storeNames); }
@Test public void shouldReleaseLockIfExceptionWhenLoadingCheckpoints() throws Exception { writeCorruptCheckpoint(); try { stateManager.initialize(context); } catch (StreamsException e) { } final StateDirectory stateDir = new StateDirectory("appId", stateDirPath, new MockTime()); try { assertTrue(stateDir.lockGlobalState(1)); } finally { stateDir.unlockGlobalState(); } }
@Test public void shouldThrowLockExceptionIfIOExceptionCaughtWhenTryingToLockStateDir() throws Exception { stateManager = new GlobalStateManagerImpl(topology, consumer, new StateDirectory("appId", stateDirPath, time) { @Override public boolean lockGlobalState(final int retry) throws IOException { throw new IOException("KABOOM!"); } }); try { stateManager.initialize(context); fail("Should have thrown LockException"); } catch (final LockException e) { } } |
### Question:
StreamTask extends AbstractTask implements Punctuator { boolean maybePunctuate() { final long timestamp = partitionGroup.timestamp(); if (timestamp == TimestampTracker.NOT_KNOWN) { return false; } else { return punctuationQueue.mayPunctuate(timestamp, this); } } StreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory,
final ThreadCache cache,
final Time time,
final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }### Answer:
@SuppressWarnings("unchecked") @Test public void testMaybePunctuate() throws Exception { task.addRecords(partition1, records( new ConsumerRecord<>(partition1.topic(), partition1.partition(), 20, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition1.topic(), partition1.partition(), 30, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition1.topic(), partition1.partition(), 40, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue) )); task.addRecords(partition2, records( new ConsumerRecord<>(partition2.topic(), partition2.partition(), 25, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 35, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue), new ConsumerRecord<>(partition2.topic(), partition2.partition(), 45, 0L, TimestampType.CREATE_TIME, 0L, 0, 0, recordKey, recordValue) )); assertTrue(task.maybePunctuate()); assertTrue(task.process()); assertEquals(5, task.numBuffered()); assertEquals(1, source1.numReceived); assertEquals(0, source2.numReceived); assertFalse(task.maybePunctuate()); assertTrue(task.process()); assertEquals(4, task.numBuffered()); assertEquals(1, source1.numReceived); assertEquals(1, source2.numReceived); assertTrue(task.maybePunctuate()); assertTrue(task.process()); assertEquals(3, task.numBuffered()); assertEquals(2, source1.numReceived); assertEquals(1, source2.numReceived); assertFalse(task.maybePunctuate()); assertTrue(task.process()); assertEquals(2, task.numBuffered()); assertEquals(2, source1.numReceived); assertEquals(2, source2.numReceived); assertTrue(task.maybePunctuate()); assertTrue(task.process()); assertEquals(1, task.numBuffered()); assertEquals(3, source1.numReceived); assertEquals(2, source2.numReceived); assertFalse(task.maybePunctuate()); assertTrue(task.process()); assertEquals(0, task.numBuffered()); assertEquals(3, source1.numReceived); assertEquals(3, source2.numReceived); assertFalse(task.process()); assertFalse(task.maybePunctuate()); processor.supplier.checkAndClearPunctuateResult(20L, 30L, 40L); } |
### Question:
StreamTask extends AbstractTask implements Punctuator { @Override protected void flushState() { log.trace("{} Flushing state and producer", logPrefix); super.flushState(); recordCollector.flush(); } StreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory,
final ThreadCache cache,
final Time time,
final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }### Answer:
@Test public void shouldFlushRecordCollectorOnFlushState() throws Exception { final AtomicBoolean flushed = new AtomicBoolean(false); final StreamsMetrics streamsMetrics = new MockStreamsMetrics(new Metrics()); final StreamTask streamTask = new StreamTask(taskId00, "appId", partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer) { @Override RecordCollector createRecordCollector() { return new NoOpRecordCollector() { @Override public void flush() { flushed.set(true); } }; } }; streamTask.flushState(); assertTrue(flushed.get()); } |
### Question:
StreamTask extends AbstractTask implements Punctuator { @Override public void punctuate(final ProcessorNode node, final long timestamp) { if (processorContext.currentNode() != null) { throw new IllegalStateException(String.format("%s Current node is not null", logPrefix)); } updateProcessorContext(new StampedRecord(DUMMY_RECORD, timestamp), node); log.trace("{} Punctuating processor {} with timestamp {}", logPrefix, node.name(), timestamp); try { node.punctuate(timestamp); } catch (final KafkaException e) { throw new StreamsException(String.format("%s Exception caught while punctuating processor '%s'", logPrefix, node.name()), e); } finally { processorContext.setCurrentNode(null); } } StreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory,
final ThreadCache cache,
final Time time,
final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }### Answer:
@Test public void shouldCallPunctuateOnPassedInProcessorNode() throws Exception { task.punctuate(processor, 5); assertThat(processor.punctuatedAt, equalTo(5L)); task.punctuate(processor, 10); assertThat(processor.punctuatedAt, equalTo(10L)); } |
### Question:
StreamTask extends AbstractTask implements Punctuator { public void schedule(final long interval) { if (processorContext.currentNode() == null) { throw new IllegalStateException(String.format("%s Current node is null", logPrefix)); } punctuationQueue.schedule(new PunctuationSchedule(processorContext.currentNode(), interval)); } StreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory,
final ThreadCache cache,
final Time time,
final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }### Answer:
@Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionOnScheduleIfCurrentNodeIsNull() throws Exception { task.schedule(1); } |
### Question:
StreamTask extends AbstractTask implements Punctuator { @Override public void close(boolean clean) { log.debug("{} Closing", logPrefix); RuntimeException firstException = null; try { suspend(clean); } catch (final RuntimeException e) { clean = false; firstException = e; log.error("{} Could not close task: ", logPrefix, e); } try { closeStateManager(clean); } catch (final RuntimeException e) { clean = false; if (firstException == null) { firstException = e; } log.error("{} Could not close state manager: ", logPrefix, e); } try { partitionGroup.close(); metrics.removeAllSensors(); } finally { if (eosEnabled) { if (!clean) { try { producer.abortTransaction(); transactionInFlight = false; } catch (final ProducerFencedException e) { } } try { recordCollector.close(); } catch (final Throwable e) { log.error("{} Failed to close producer: ", logPrefix, e); } } } if (firstException != null) { throw firstException; } } StreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory,
final ThreadCache cache,
final Time time,
final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }### Answer:
@SuppressWarnings("unchecked") @Test public void shouldThrowExceptionIfAnyExceptionsRaisedDuringCloseButStillCloseAllProcessorNodesTopology() throws Exception { task.close(true); task = createTaskThatThrowsExceptionOnClose(); try { task.close(true); fail("should have thrown runtime exception"); } catch (final RuntimeException e) { task = null; } assertTrue(processor.closed); assertTrue(source1.closed); assertTrue(source2.closed); }
@Test public void shouldAbortTransactionOnDirtyClosedIfEosEnabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, eosConfig, streamsMetrics, stateDirectory, null, time, producer); task.close(false); task = null; assertTrue(producer.transactionAborted()); }
@Test public void shouldNotAbortTransactionOnDirtyClosedIfEosDisabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, config, streamsMetrics, stateDirectory, null, time, producer); task.close(false); assertFalse(producer.transactionAborted()); }
@SuppressWarnings("unchecked") @Test public void shouldCloseProducerOnCloseWhenEosEnabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, eosConfig, streamsMetrics, stateDirectory, null, time, producer); task.close(true); task = null; assertTrue(producer.closed()); } |
### Question:
StreamTask extends AbstractTask implements Punctuator { @Override public void suspend() { suspend(true); } StreamTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final StreamsConfig config,
final StreamsMetrics metrics,
final StateDirectory stateDirectory,
final ThreadCache cache,
final Time time,
final Producer<byte[], byte[]> producer); @Override void resume(); @SuppressWarnings("unchecked") boolean process(); @Override void punctuate(final ProcessorNode node, final long timestamp); @Override void commit(); @Override void suspend(); @Override void close(boolean clean); @SuppressWarnings("unchecked") int addRecords(final TopicPartition partition, final Iterable<ConsumerRecord<byte[], byte[]>> records); void schedule(final long interval); }### Answer:
@Test public void shouldCommitTransactionOnSuspendEvenIfTransactionIsEmptyIfEosEnabled() throws Exception { final MockProducer producer = new MockProducer(); task = new StreamTask(taskId00, applicationId, partitions, topology, consumer, changelogReader, eosConfig, streamsMetrics, stateDirectory, null, time, producer); task.suspend(); assertTrue(producer.transactionCommitted()); assertFalse(producer.transactionInFlight()); } |
### Question:
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName) { Objects.requireNonNull(storeName, "storeName cannot be null"); if (!isInitialized()) { return Collections.emptyList(); } if (globalStores.contains(storeName)) { return allMetadata; } final List<String> sourceTopics = builder.stateStoreNameToSourceTopics().get(storeName); if (sourceTopics == null) { return Collections.emptyList(); } final ArrayList<StreamsMetadata> results = new ArrayList<>(); for (StreamsMetadata metadata : allMetadata) { if (metadata.stateStoreNames().contains(storeName)) { results.add(metadata); } } return results; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName,
final K key,
final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }### Answer:
@Test public void shouldNotThrowNPEWhenOnChangeNotCalled() throws Exception { new StreamsMetadataState(builder, hostOne).getAllMetadataForStore("store"); }
@Test public void shouldGetInstancesForStoreName() throws Exception { final StreamsMetadata one = new StreamsMetadata(hostOne, Utils.mkSet(globalTable, "table-one", "table-two", "merged-table"), Utils.mkSet(topic1P0, topic2P1, topic4P0)); final StreamsMetadata two = new StreamsMetadata(hostTwo, Utils.mkSet(globalTable, "table-two", "table-one", "merged-table"), Utils.mkSet(topic2P0, topic1P1)); final Collection<StreamsMetadata> actual = discovery.getAllMetadataForStore("table-one"); assertEquals(2, actual.size()); assertTrue("expected " + actual + " to contain " + one, actual.contains(one)); assertTrue("expected " + actual + " to contain " + two, actual.contains(two)); }
@Test(expected = NullPointerException.class) public void shouldThrowIfStoreNameIsNullOnGetAllInstancesWithStore() throws Exception { discovery.getAllMetadataForStore(null); }
@Test public void shouldReturnEmptyCollectionOnGetAllInstancesWithStoreWhenStoreDoesntExist() throws Exception { final Collection<StreamsMetadata> actual = discovery.getAllMetadataForStore("not-a-store"); assertTrue(actual.isEmpty()); }
@Test public void shouldHaveGlobalStoreInAllMetadata() throws Exception { final Collection<StreamsMetadata> metadata = discovery.getAllMetadataForStore(globalTable); assertEquals(3, metadata.size()); for (StreamsMetadata streamsMetadata : metadata) { assertTrue(streamsMetadata.stateStoreNames().contains(globalTable)); } } |
### Question:
StreamsMetadataState { public synchronized Collection<StreamsMetadata> getAllMetadata() { return allMetadata; } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName,
final K key,
final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }### Answer:
@Test public void shouldGetAllStreamInstances() throws Exception { final StreamsMetadata one = new StreamsMetadata(hostOne, Utils.mkSet(globalTable, "table-one", "table-two", "merged-table"), Utils.mkSet(topic1P0, topic2P1, topic4P0)); final StreamsMetadata two = new StreamsMetadata(hostTwo, Utils.mkSet(globalTable, "table-two", "table-one", "merged-table"), Utils.mkSet(topic2P0, topic1P1)); final StreamsMetadata three = new StreamsMetadata(hostThree, Utils.mkSet(globalTable, "table-three"), Collections.singleton(topic3P0)); Collection<StreamsMetadata> actual = discovery.getAllMetadata(); assertEquals(3, actual.size()); assertTrue("expected " + actual + " to contain " + one, actual.contains(one)); assertTrue("expected " + actual + " to contain " + two, actual.contains(two)); assertTrue("expected " + actual + " to contain " + three, actual.contains(three)); } |
### Question:
StreamsMetadataState { public synchronized <K> StreamsMetadata getMetadataWithKey(final String storeName, final K key, final Serializer<K> keySerializer) { Objects.requireNonNull(keySerializer, "keySerializer can't be null"); Objects.requireNonNull(storeName, "storeName can't be null"); Objects.requireNonNull(key, "key can't be null"); if (!isInitialized()) { return StreamsMetadata.NOT_AVAILABLE; } if (globalStores.contains(storeName)) { if (thisHost == UNKNOWN_HOST) { return allMetadata.get(0); } return myMetadata; } final SourceTopicsInfo sourceTopicsInfo = getSourceTopicsInfo(storeName); if (sourceTopicsInfo == null) { return null; } return getStreamsMetadataForKey(storeName, key, new DefaultStreamPartitioner<>(keySerializer, clusterMetadata, sourceTopicsInfo.topicWithMostPartitions), sourceTopicsInfo); } StreamsMetadataState(final TopologyBuilder builder, final HostInfo thisHost); synchronized Collection<StreamsMetadata> getAllMetadata(); synchronized Collection<StreamsMetadata> getAllMetadataForStore(final String storeName); synchronized StreamsMetadata getMetadataWithKey(final String storeName,
final K key,
final Serializer<K> keySerializer); synchronized StreamsMetadata getMetadataWithKey(final String storeName,
final K key,
final StreamPartitioner<? super K, ?> partitioner); synchronized void onChange(final Map<HostInfo, Set<TopicPartition>> currentState, final Cluster clusterMetadata); static final HostInfo UNKNOWN_HOST; }### Answer:
@Test public void shouldReturnNullOnGetWithKeyWhenStoreDoesntExist() throws Exception { final StreamsMetadata actual = discovery.getMetadataWithKey("not-a-store", "key", Serdes.String().serializer()); assertNull(actual); }
@Test(expected = NullPointerException.class) public void shouldThrowWhenKeyIsNull() throws Exception { discovery.getMetadataWithKey("table-three", null, Serdes.String().serializer()); }
@Test(expected = NullPointerException.class) public void shouldThrowWhenSerializerIsNull() throws Exception { discovery.getMetadataWithKey("table-three", "key", (Serializer) null); }
@Test(expected = NullPointerException.class) public void shouldThrowIfStoreNameIsNull() throws Exception { discovery.getMetadataWithKey(null, "key", Serdes.String().serializer()); }
@SuppressWarnings("unchecked") @Test(expected = NullPointerException.class) public void shouldThrowIfStreamPartitionerIsNull() throws Exception { discovery.getMetadataWithKey(null, "key", (StreamPartitioner) null); }
@Test public void shouldGetMyMetadataForGlobalStoreWithKey() throws Exception { final StreamsMetadata metadata = discovery.getMetadataWithKey(globalTable, "key", Serdes.String().serializer()); assertEquals(hostOne, metadata.hostInfo()); }
@Test public void shouldGetMyMetadataForGlobalStoreWithKeyAndPartitioner() throws Exception { final StreamsMetadata metadata = discovery.getMetadataWithKey(globalTable, "key", partitioner); assertEquals(hostOne, metadata.hostInfo()); } |
### Question:
SinkNode extends ProcessorNode<K, V> { @Override public void process(final K key, final V value) { final RecordCollector collector = ((RecordCollector.Supplier) context).recordCollector(); final long timestamp = context.timestamp(); if (timestamp < 0) { throw new StreamsException("Invalid (negative) timestamp of " + timestamp + " for output record <" + key + ":" + value + ">."); } try { collector.send(topic, key, value, timestamp, keySerializer, valSerializer, partitioner); } catch (final ClassCastException e) { final String keyClass = key == null ? "unknown because key is null" : key.getClass().getName(); final String valueClass = value == null ? "unknown because value is null" : value.getClass().getName(); throw new StreamsException( String.format("A serializer (key: %s / value: %s) is not compatible to the actual key or value type " + "(key type: %s / value type: %s). Change the default Serdes in StreamConfig or " + "provide correct Serdes via method parameters.", keySerializer.getClass().getName(), valSerializer.getClass().getName(), keyClass, valueClass), e); } } SinkNode(final String name,
final String topic,
final Serializer<K> keySerializer,
final Serializer<V> valSerializer,
final StreamPartitioner<? super K, ? super V> partitioner); @Override void addChild(final ProcessorNode<?, ?> child); @SuppressWarnings("unchecked") @Override void init(final ProcessorContext context); @Override void process(final K key, final V value); @Override String toString(); @Override String toString(final String indent); }### Answer:
@Test @SuppressWarnings("unchecked") public void shouldThrowStreamsExceptionOnInputRecordWithInvalidTimestamp() { final Bytes anyKey = new Bytes("any key".getBytes()); final Bytes anyValue = new Bytes("any value".getBytes()); context.setTime(-1); try { sink.process(anyKey, anyValue); fail("Should have thrown StreamsException"); } catch (final StreamsException ignored) { } }
@Test @SuppressWarnings("unchecked") public void shouldThrowStreamsExceptionOnKeyValueTypeSerializerMismatch() { final String keyOfDifferentTypeThanSerializer = "key with different type"; final String valueOfDifferentTypeThanSerializer = "value with different type"; context.setTime(0); try { sink.process(keyOfDifferentTypeThanSerializer, valueOfDifferentTypeThanSerializer); fail("Should have thrown StreamsException"); } catch (final StreamsException e) { assertThat(e.getCause(), instanceOf(ClassCastException.class)); } }
@Test @SuppressWarnings("unchecked") public void shouldHandleNullKeysWhenThrowingStreamsExceptionOnKeyValueTypeSerializerMismatch() { final String invalidValueToTriggerSerializerMismatch = ""; context.setTime(1); try { sink.process(null, invalidValueToTriggerSerializerMismatch); fail("Should have thrown StreamsException"); } catch (final StreamsException e) { assertThat(e.getCause(), instanceOf(ClassCastException.class)); assertThat(e.getMessage(), containsString("unknown because key is null")); } }
@Test @SuppressWarnings("unchecked") public void shouldHandleNullValuesWhenThrowingStreamsExceptionOnKeyValueTypeSerializerMismatch() { final String invalidKeyToTriggerSerializerMismatch = ""; context.setTime(1); try { sink.process(invalidKeyToTriggerSerializerMismatch, null); fail("Should have thrown StreamsException"); } catch (final StreamsException e) { assertThat(e.getCause(), instanceOf(ClassCastException.class)); assertThat(e.getMessage(), containsString("unknown because value is null")); } } |
### Question:
ProcessorTopology { public List<StateStore> globalStateStores() { return globalStateStores; } ProcessorTopology(final List<ProcessorNode> processorNodes,
final Map<String, SourceNode> sourceByTopics,
final Map<String, SinkNode> sinkByTopics,
final List<StateStore> stateStores,
final Map<String, String> storeToChangelogTopic,
final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }### Answer:
@SuppressWarnings("unchecked") @Test public void shouldDriveGlobalStore() throws Exception { final StateStoreSupplier storeSupplier = Stores.create("my-store") .withStringKeys().withStringValues().inMemory().disableLogging().build(); final String global = "global"; final String topic = "topic"; final TopologyBuilder topologyBuilder = this.builder .addGlobalStore(storeSupplier, global, STRING_DESERIALIZER, STRING_DESERIALIZER, topic, "processor", define(new StatefulProcessor("my-store"))); driver = new ProcessorTopologyTestDriver(config, topologyBuilder); final KeyValueStore<String, String> globalStore = (KeyValueStore<String, String>) topologyBuilder.globalStateStores().get("my-store"); driver.process(topic, "key1", "value1", STRING_SERIALIZER, STRING_SERIALIZER); driver.process(topic, "key2", "value2", STRING_SERIALIZER, STRING_SERIALIZER); assertEquals("value1", globalStore.get("key1")); assertEquals("value2", globalStore.get("key2")); } |
### Question:
ProcessorTopology { @Override public String toString() { return toString(""); } ProcessorTopology(final List<ProcessorNode> processorNodes,
final Map<String, SourceNode> sourceByTopics,
final Map<String, SinkNode> sinkByTopics,
final List<StateStore> stateStores,
final Map<String, String> storeToChangelogTopic,
final List<StateStore> globalStateStores); Set<String> sourceTopics(); SourceNode source(String topic); Set<SourceNode> sources(); Set<String> sinkTopics(); SinkNode sink(String topic); Set<SinkNode> sinks(); List<ProcessorNode> processors(); List<StateStore> stateStores(); Map<String, String> storeToChangelogTopic(); List<StateStore> globalStateStores(); @Override String toString(); String toString(final String indent); }### Answer:
@Test public void shouldCreateStringWithSourceAndTopics() throws Exception { builder.addSource("source", "topic1", "topic2"); final ProcessorTopology topology = builder.build(null); final String result = topology.toString(); assertThat(result, containsString("source:\n\t\ttopics:\t\t[topic1, topic2]\n")); }
@Test public void shouldCreateStringWithMultipleSourcesAndTopics() throws Exception { builder.addSource("source", "topic1", "topic2"); builder.addSource("source2", "t", "t1", "t2"); final ProcessorTopology topology = builder.build(null); final String result = topology.toString(); assertThat(result, containsString("source:\n\t\ttopics:\t\t[topic1, topic2]\n")); assertThat(result, containsString("source2:\n\t\ttopics:\t\t[t, t1, t2]\n")); }
@Test public void shouldCreateStringWithProcessors() throws Exception { builder.addSource("source", "t") .addProcessor("processor", mockProcessorSupplier, "source") .addProcessor("other", mockProcessorSupplier, "source"); final ProcessorTopology topology = builder.build(null); final String result = topology.toString(); assertThat(result, containsString("\t\tchildren:\t[processor, other]")); assertThat(result, containsString("processor:\n")); assertThat(result, containsString("other:\n")); }
@Test public void shouldRecursivelyPrintChildren() throws Exception { builder.addSource("source", "t") .addProcessor("processor", mockProcessorSupplier, "source") .addProcessor("child-one", mockProcessorSupplier, "processor") .addProcessor("child-one-one", mockProcessorSupplier, "child-one") .addProcessor("child-two", mockProcessorSupplier, "processor") .addProcessor("child-two-one", mockProcessorSupplier, "child-two"); final String result = builder.build(null).toString(); assertThat(result, containsString("child-one:\n\t\tchildren:\t[child-one-one]")); assertThat(result, containsString("child-two:\n\t\tchildren:\t[child-two-one]")); } |
### Question:
ConnectorsResource { @POST @Path("/") public Response createConnector(final @QueryParam("forward") Boolean forward, final CreateConnectorRequest createRequest) throws Throwable { String name = createRequest.name(); if (name.contains("/")) { throw new BadRequestException("connector name should not contain '/'"); } Map<String, String> configs = createRequest.config(); if (!configs.containsKey(ConnectorConfig.NAME_CONFIG)) configs.put(ConnectorConfig.NAME_CONFIG, name); FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, false, cb); Herder.Created<ConnectorInfo> info = completeOrForwardRequest(cb, "/connectors", "POST", createRequest, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); return Response.created(URI.create("/connectors/" + name)).entity(info.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }### Answer:
@Test public void testCreateConnector() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME)); final Capture<Callback<Herder.Created<ConnectorInfo>>> cb = Capture.newInstance(); herder.putConnectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(body.config()), EasyMock.eq(false), EasyMock.capture(cb)); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, CONNECTOR_TASK_NAMES))); PowerMock.replayAll(); connectorsResource.createConnector(FORWARD, body); PowerMock.verifyAll(); } |
### Question:
InternalTopicConfig { public Properties toProperties(final long additionalRetentionMs) { final Properties result = new Properties(); for (Map.Entry<String, String> configEntry : logConfig.entrySet()) { result.put(configEntry.getKey(), configEntry.getValue()); } if (retentionMs != null && isCompactDelete()) { result.put(InternalTopicManager.RETENTION_MS, String.valueOf(retentionMs + additionalRetentionMs)); } if (!logConfig.containsKey(InternalTopicManager.CLEANUP_POLICY_PROP)) { final StringBuilder builder = new StringBuilder(); for (CleanupPolicy cleanupPolicy : cleanupPolicies) { builder.append(cleanupPolicy.name()).append(","); } builder.deleteCharAt(builder.length() - 1); result.put(InternalTopicManager.CLEANUP_POLICY_PROP, builder.toString()); } return result; } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }### Answer:
@Test public void shouldHaveCompactionPropSetIfSupplied() throws Exception { final Properties properties = new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), Collections.<String, String>emptyMap()).toProperties(0); assertEquals("compact", properties.getProperty(InternalTopicManager.CLEANUP_POLICY_PROP)); }
@Test public void shouldUseCleanupPolicyFromConfigIfSupplied() throws Exception { final InternalTopicConfig config = new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.singletonMap("cleanup.policy", "compact")); final Properties properties = config.toProperties(0); assertEquals("compact", properties.getProperty("cleanup.policy")); }
@Test public void shouldHavePropertiesSuppliedByUser() throws Exception { final Map<String, String> configs = new HashMap<>(); configs.put("retention.ms", "1000"); configs.put("retention.bytes", "10000"); final InternalTopicConfig topicConfig = new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), configs); final Properties properties = topicConfig.toProperties(0); assertEquals("1000", properties.getProperty("retention.ms")); assertEquals("10000", properties.getProperty("retention.bytes")); } |
### Question:
InternalTopicConfig { boolean isCompacted() { return cleanupPolicies.contains(CleanupPolicy.compact); } InternalTopicConfig(final String name, final Set<CleanupPolicy> defaultCleanupPolicies, final Map<String, String> logConfig); Properties toProperties(final long additionalRetentionMs); String name(); void setRetentionMs(final long retentionMs); @Override boolean equals(final Object o); @Override int hashCode(); }### Answer:
@Test public void shouldBeCompactedIfCleanupPolicyCompactOrCompactAndDelete() throws Exception { assertTrue(new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), Collections.<String, String>emptyMap()).isCompacted()); assertTrue(new InternalTopicConfig("name", Utils.mkSet(InternalTopicConfig.CleanupPolicy.compact, InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap()).isCompacted()); }
@Test public void shouldNotBeCompactedWhenCleanupPolicyIsDelete() throws Exception { assertFalse(new InternalTopicConfig("name", Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap()).isCompacted()); } |
### Question:
StreamPartitionAssignor implements PartitionAssignor, Configurable { @Override public Subscription subscription(Set<String> topics) { final Set<TaskId> previousActiveTasks = streamThread.prevActiveTasks(); Set<TaskId> standbyTasks = streamThread.cachedTasks(); standbyTasks.removeAll(previousActiveTasks); SubscriptionInfo data = new SubscriptionInfo(streamThread.processId, previousActiveTasks, standbyTasks, this.userEndPoint); if (streamThread.builder.sourceTopicPattern() != null && !streamThread.builder.subscriptionUpdates().getUpdates().equals(topics)) { updateSubscribedTopics(topics); } return new Subscription(new ArrayList<>(topics), data.encode()); } @Override void configure(Map<String, ?> configs); @Override String name(); @Override Subscription subscription(Set<String> topics); @Override Map<String, Assignment> assign(Cluster metadata, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); void close(); final static int NOT_AVAILABLE; }### Answer:
@SuppressWarnings("unchecked") @Test public void testSubscription() throws Exception { builder.addSource("source1", "topic1"); builder.addSource("source2", "topic2"); builder.addProcessor("processor", new MockProcessorSupplier(), "source1", "source2"); final Set<TaskId> prevTasks = Utils.mkSet( new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1)); final Set<TaskId> cachedTasks = Utils.mkSet( new TaskId(0, 1), new TaskId(1, 1), new TaskId(2, 1), new TaskId(0, 2), new TaskId(1, 2), new TaskId(2, 2)); String clientId = "client-id"; UUID processId = UUID.randomUUID(); StreamThread thread = new StreamThread(builder, config, new MockClientSupplier(), "test", clientId, processId, new Metrics(), Time.SYSTEM, new StreamsMetadataState(builder, StreamsMetadataState.UNKNOWN_HOST), 0) { @Override public Set<TaskId> prevActiveTasks() { return prevTasks; } @Override public Set<TaskId> cachedTasks() { return cachedTasks; } }; partitionAssignor.configure(config.getConsumerConfigs(thread, "test", clientId)); PartitionAssignor.Subscription subscription = partitionAssignor.subscription(Utils.mkSet("topic1", "topic2")); Collections.sort(subscription.topics()); assertEquals(Utils.mkList("topic1", "topic2"), subscription.topics()); Set<TaskId> standbyTasks = new HashSet<>(cachedTasks); standbyTasks.removeAll(prevTasks); SubscriptionInfo info = new SubscriptionInfo(processId, prevTasks, standbyTasks, null); assertEquals(info.encode(), subscription.userData()); } |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.