target
stringlengths 20
113k
| src_fm
stringlengths 11
86.3k
| src_fm_fc
stringlengths 21
86.4k
| src_fm_fc_co
stringlengths 30
86.4k
| src_fm_fc_ms
stringlengths 42
86.8k
| src_fm_fc_ms_ff
stringlengths 43
86.8k
|
---|---|---|---|---|---|
@Test public void dateToJson() throws IOException { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.DATE, 10000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Date.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isInt()); assertEquals(10000, payload.intValue()); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void testValue() { final byte[] bytes = "Some String".getBytes(); assertEquals(608512271, Crc32C.compute(bytes, 0, bytes.length)); }
|
public static long compute(byte[] bytes, int offset, int size) { Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); }
|
Crc32C { public static long compute(byte[] bytes, int offset, int size) { Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); } }
|
Crc32C { public static long compute(byte[] bytes, int offset, int size) { Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); } }
|
Crc32C { public static long compute(byte[] bytes, int offset, int size) { Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); } static long compute(byte[] bytes, int offset, int size); static long compute(ByteBuffer buffer, int offset, int size); static Checksum create(); }
|
Crc32C { public static long compute(byte[] bytes, int offset, int size) { Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); } static long compute(byte[] bytes, int offset, int size); static long compute(ByteBuffer buffer, int offset, int size); static Checksum create(); }
|
@Test public void testUpdate() { final byte[] bytes = "Any String you want".getBytes(); final int len = bytes.length; Checksum crc1 = Crc32C.create(); Checksum crc2 = Crc32C.create(); Checksum crc3 = Crc32C.create(); crc1.update(bytes, 0, len); for (int i = 0; i < len; i++) crc2.update(bytes[i]); crc3.update(bytes, 0, len / 2); crc3.update(bytes, len / 2, len - len / 2); assertEquals("Crc values should be the same", crc1.getValue(), crc2.getValue()); assertEquals("Crc values should be the same", crc1.getValue(), crc3.getValue()); }
|
@Override public void update(byte[] b, int off, int len) { if (off < 0 || len < 0 || off > b.length - len) throw new ArrayIndexOutOfBoundsException(); int localCrc = crc; while (len > 7) { final int c0 = (b[off + 0] ^ localCrc) & 0xff; final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); final int c4 = b[off + 4] & 0xff; final int c5 = b[off + 5] & 0xff; final int c6 = b[off + 6] & 0xff; final int c7 = b[off + 7] & 0xff; localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); off += 8; len -= 8; } switch (len) { case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; default: } crc = localCrc; }
|
Crc32 implements Checksum { @Override public void update(byte[] b, int off, int len) { if (off < 0 || len < 0 || off > b.length - len) throw new ArrayIndexOutOfBoundsException(); int localCrc = crc; while (len > 7) { final int c0 = (b[off + 0] ^ localCrc) & 0xff; final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); final int c4 = b[off + 4] & 0xff; final int c5 = b[off + 5] & 0xff; final int c6 = b[off + 6] & 0xff; final int c7 = b[off + 7] & 0xff; localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); off += 8; len -= 8; } switch (len) { case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; default: } crc = localCrc; } }
|
Crc32 implements Checksum { @Override public void update(byte[] b, int off, int len) { if (off < 0 || len < 0 || off > b.length - len) throw new ArrayIndexOutOfBoundsException(); int localCrc = crc; while (len > 7) { final int c0 = (b[off + 0] ^ localCrc) & 0xff; final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); final int c4 = b[off + 4] & 0xff; final int c5 = b[off + 5] & 0xff; final int c6 = b[off + 6] & 0xff; final int c7 = b[off + 7] & 0xff; localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); off += 8; len -= 8; } switch (len) { case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; default: } crc = localCrc; } Crc32(); }
|
Crc32 implements Checksum { @Override public void update(byte[] b, int off, int len) { if (off < 0 || len < 0 || off > b.length - len) throw new ArrayIndexOutOfBoundsException(); int localCrc = crc; while (len > 7) { final int c0 = (b[off + 0] ^ localCrc) & 0xff; final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); final int c4 = b[off + 4] & 0xff; final int c5 = b[off + 5] & 0xff; final int c6 = b[off + 6] & 0xff; final int c7 = b[off + 7] & 0xff; localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); off += 8; len -= 8; } switch (len) { case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; default: } crc = localCrc; } Crc32(); static long crc32(byte[] bytes); static long crc32(byte[] bytes, int offset, int size); static long crc32(ByteBuffer buffer, int offset, int size); @Override long getValue(); @Override void reset(); @Override void update(byte[] b, int off, int len); @Override final void update(int b); }
|
Crc32 implements Checksum { @Override public void update(byte[] b, int off, int len) { if (off < 0 || len < 0 || off > b.length - len) throw new ArrayIndexOutOfBoundsException(); int localCrc = crc; while (len > 7) { final int c0 = (b[off + 0] ^ localCrc) & 0xff; final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); final int c4 = b[off + 4] & 0xff; final int c5 = b[off + 5] & 0xff; final int c6 = b[off + 6] & 0xff; final int c7 = b[off + 7] & 0xff; localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); off += 8; len -= 8; } switch (len) { case 7: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 6: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 5: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 4: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 3: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 2: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; case 1: localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; default: } crc = localCrc; } Crc32(); static long crc32(byte[] bytes); static long crc32(byte[] bytes, int offset, int size); static long crc32(ByteBuffer buffer, int offset, int size); @Override long getValue(); @Override void reset(); @Override void update(byte[] b, int off, int len); @Override final void update(int b); }
|
@Test public void testValue() { final byte[] bytes = "Some String".getBytes(); assertEquals(2021503672, Crc32.crc32(bytes)); }
|
public static long crc32(byte[] bytes) { return crc32(bytes, 0, bytes.length); }
|
Crc32 implements Checksum { public static long crc32(byte[] bytes) { return crc32(bytes, 0, bytes.length); } }
|
Crc32 implements Checksum { public static long crc32(byte[] bytes) { return crc32(bytes, 0, bytes.length); } Crc32(); }
|
Crc32 implements Checksum { public static long crc32(byte[] bytes) { return crc32(bytes, 0, bytes.length); } Crc32(); static long crc32(byte[] bytes); static long crc32(byte[] bytes, int offset, int size); static long crc32(ByteBuffer buffer, int offset, int size); @Override long getValue(); @Override void reset(); @Override void update(byte[] b, int off, int len); @Override final void update(int b); }
|
Crc32 implements Checksum { public static long crc32(byte[] bytes) { return crc32(bytes, 0, bytes.length); } Crc32(); static long crc32(byte[] bytes); static long crc32(byte[] bytes, int offset, int size); static long crc32(ByteBuffer buffer, int offset, int size); @Override long getValue(); @Override void reset(); @Override void update(byte[] b, int off, int len); @Override final void update(int b); }
|
@Test public void testReadUnsignedIntLEFromArray() { byte[] array1 = {0x01, 0x02, 0x03, 0x04, 0x05}; assertEquals(0x04030201, ByteUtils.readUnsignedIntLE(array1, 0)); assertEquals(0x05040302, ByteUtils.readUnsignedIntLE(array1, 1)); byte[] array2 = {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4, (byte) 0xf5, (byte) 0xf6}; assertEquals(0xf4f3f2f1, ByteUtils.readUnsignedIntLE(array2, 0)); assertEquals(0xf6f5f4f3, ByteUtils.readUnsignedIntLE(array2, 2)); }
|
public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test public void testReadUnsignedIntLEFromInputStream() throws IOException { byte[] array1 = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}; ByteArrayInputStream is1 = new ByteArrayInputStream(array1); assertEquals(0x04030201, ByteUtils.readUnsignedIntLE(is1)); assertEquals(0x08070605, ByteUtils.readUnsignedIntLE(is1)); byte[] array2 = {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4, (byte) 0xf5, (byte) 0xf6, (byte) 0xf7, (byte) 0xf8}; ByteArrayInputStream is2 = new ByteArrayInputStream(array2); assertEquals(0xf4f3f2f1, ByteUtils.readUnsignedIntLE(is2)); assertEquals(0xf8f7f6f5, ByteUtils.readUnsignedIntLE(is2)); }
|
public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static int readUnsignedIntLE(InputStream in) throws IOException { return in.read() | (in.read() << 8) | (in.read() << 16) | (in.read() << 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test public void testReadUnsignedInt() { ByteBuffer buffer = ByteBuffer.allocate(4); long writeValue = 133444; ByteUtils.writeUnsignedInt(buffer, writeValue); buffer.flip(); long readValue = ByteUtils.readUnsignedInt(buffer); assertEquals(writeValue, readValue); }
|
public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; }
|
ByteUtils { public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; } }
|
ByteUtils { public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; } private ByteUtils(); }
|
ByteUtils { public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static long readUnsignedInt(ByteBuffer buffer) { return buffer.getInt() & 0xffffffffL; } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test public void testWriteUnsignedIntLEToArray() { int value1 = 0x04030201; byte[] array1 = new byte[4]; ByteUtils.writeUnsignedIntLE(array1, 0, value1); assertArrayEquals(new byte[] {0x01, 0x02, 0x03, 0x04}, array1); array1 = new byte[8]; ByteUtils.writeUnsignedIntLE(array1, 2, value1); assertArrayEquals(new byte[] {0, 0, 0x01, 0x02, 0x03, 0x04, 0, 0}, array1); int value2 = 0xf4f3f2f1; byte[] array2 = new byte[4]; ByteUtils.writeUnsignedIntLE(array2, 0, value2); assertArrayEquals(new byte[] {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4}, array2); array2 = new byte[8]; ByteUtils.writeUnsignedIntLE(array2, 2, value2); assertArrayEquals(new byte[] {0, 0, (byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4, 0, 0}, array2); }
|
public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test public void testWriteUnsignedIntLEToOutputStream() throws IOException { int value1 = 0x04030201; ByteArrayOutputStream os1 = new ByteArrayOutputStream(); ByteUtils.writeUnsignedIntLE(os1, value1); ByteUtils.writeUnsignedIntLE(os1, value1); assertArrayEquals(new byte[] {0x01, 0x02, 0x03, 0x04, 0x01, 0x02, 0x03, 0x04}, os1.toByteArray()); int value2 = 0xf4f3f2f1; ByteArrayOutputStream os2 = new ByteArrayOutputStream(); ByteUtils.writeUnsignedIntLE(os2, value2); assertArrayEquals(new byte[] {(byte) 0xf1, (byte) 0xf2, (byte) 0xf3, (byte) 0xf4}, os2.toByteArray()); }
|
public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static void writeUnsignedIntLE(OutputStream out, int value) throws IOException { out.write(value); out.write(value >>> 8); out.write(value >>> 16); out.write(value >>> 24); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test(expected = IllegalArgumentException.class) public void testInvalidVarint() { ByteBuffer buf = ByteBuffer.wrap(new byte[] {xFF, xFF, xFF, xFF, xFF, x01}); ByteUtils.readVarint(buf); }
|
public static int readVarint(ByteBuffer buffer) { int value = 0; int i = 0; int b; while (((b = buffer.get()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 28) throw illegalVarintException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); }
|
ByteUtils { public static int readVarint(ByteBuffer buffer) { int value = 0; int i = 0; int b; while (((b = buffer.get()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 28) throw illegalVarintException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } }
|
ByteUtils { public static int readVarint(ByteBuffer buffer) { int value = 0; int i = 0; int b; while (((b = buffer.get()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 28) throw illegalVarintException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); }
|
ByteUtils { public static int readVarint(ByteBuffer buffer) { int value = 0; int i = 0; int b; while (((b = buffer.get()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 28) throw illegalVarintException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static int readVarint(ByteBuffer buffer) { int value = 0; int i = 0; int b; while (((b = buffer.get()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 28) throw illegalVarintException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test(expected = IllegalArgumentException.class) public void testInvalidVarlong() { ByteBuffer buf = ByteBuffer.wrap(new byte[] {xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, xFF, x01}); ByteUtils.readVarlong(buf); }
|
public static long readVarlong(DataInput in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.readByte()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); }
|
ByteUtils { public static long readVarlong(DataInput in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.readByte()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } }
|
ByteUtils { public static long readVarlong(DataInput in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.readByte()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); }
|
ByteUtils { public static long readVarlong(DataInput in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.readByte()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
ByteUtils { public static long readVarlong(DataInput in) throws IOException { long value = 0L; int i = 0; long b; while (((b = in.readByte()) & 0x80) != 0) { value |= (b & 0x7f) << i; i += 7; if (i > 63) throw illegalVarlongException(value); } value |= b << i; return (value >>> 1) ^ -(value & 1); } private ByteUtils(); static long readUnsignedInt(ByteBuffer buffer); static long readUnsignedInt(ByteBuffer buffer, int index); static int readUnsignedIntLE(InputStream in); static int readUnsignedIntLE(byte[] buffer, int offset); static void writeUnsignedInt(ByteBuffer buffer, int index, long value); static void writeUnsignedInt(ByteBuffer buffer, long value); static void writeUnsignedIntLE(OutputStream out, int value); static void writeUnsignedIntLE(byte[] buffer, int offset, int value); static int readVarint(ByteBuffer buffer); static int readVarint(DataInput in); static long readVarlong(DataInput in); static long readVarlong(ByteBuffer buffer); static void writeVarint(int value, DataOutput out); static void writeVarint(int value, ByteBuffer buffer); static void writeVarlong(long value, DataOutput out); static void writeVarlong(long value, ByteBuffer buffer); static int sizeOfVarint(int value); static int sizeOfVarlong(long value); }
|
@Test public void timeToJson() throws IOException { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 14400000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Time.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int32\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isInt()); assertEquals(14400000, payload.longValue()); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void intToConnect() { assertEquals(new SchemaAndValue(Schema.INT32_SCHEMA, 12), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int32\" }, \"payload\": 12 }".getBytes())); }
|
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void testGetHost() { assertEquals("127.0.0.1", getHost("127.0.0.1:8000")); assertEquals("mydomain.com", getHost("PLAINTEXT: assertEquals("MyDomain.com", getHost("PLAINTEXT: assertEquals("My_Domain.com", getHost("PLAINTEXT: assertEquals("::1", getHost("[::1]:1234")); assertEquals("2001:db8:85a3:8d3:1319:8a2e:370:7348", getHost("PLAINTEXT: assertEquals("2001:DB8:85A3:8D3:1319:8A2E:370:7348", getHost("PLAINTEXT: assertEquals("fe80::b1da:69ca:57f7:63d8%3", getHost("PLAINTEXT: }
|
public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? matcher.group(1) : null; }
|
Utils { public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? matcher.group(1) : null; } }
|
Utils { public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? matcher.group(1) : null; } }
|
Utils { public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? matcher.group(1) : null; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static String getHost(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? matcher.group(1) : null; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testGetPort() { assertEquals(8000, getPort("127.0.0.1:8000").intValue()); assertEquals(8080, getPort("mydomain.com:8080").intValue()); assertEquals(8080, getPort("MyDomain.com:8080").intValue()); assertEquals(1234, getPort("[::1]:1234").intValue()); assertEquals(5678, getPort("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678").intValue()); assertEquals(5678, getPort("[2001:DB8:85A3:8D3:1319:8A2E:370:7348]:5678").intValue()); assertEquals(5678, getPort("[fe80::b1da:69ca:57f7:63d8%3]:5678").intValue()); }
|
public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; }
|
Utils { public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; } }
|
Utils { public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; } }
|
Utils { public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static Integer getPort(String address) { Matcher matcher = HOST_PORT_PATTERN.matcher(address); return matcher.matches() ? Integer.parseInt(matcher.group(2)) : null; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testFormatAddress() { assertEquals("127.0.0.1:8000", formatAddress("127.0.0.1", 8000)); assertEquals("mydomain.com:8080", formatAddress("mydomain.com", 8080)); assertEquals("[::1]:1234", formatAddress("::1", 1234)); assertEquals("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:5678", formatAddress("2001:db8:85a3:8d3:1319:8a2e:370:7348", 5678)); }
|
public static String formatAddress(String host, Integer port) { return host.contains(":") ? "[" + host + "]:" + port : host + ":" + port; }
|
Utils { public static String formatAddress(String host, Integer port) { return host.contains(":") ? "[" + host + "]:" + port : host + ":" + port; } }
|
Utils { public static String formatAddress(String host, Integer port) { return host.contains(":") ? "[" + host + "]:" + port : host + ":" + port; } }
|
Utils { public static String formatAddress(String host, Integer port) { return host.contains(":") ? "[" + host + "]:" + port : host + ":" + port; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static String formatAddress(String host, Integer port) { return host.contains(":") ? "[" + host + "]:" + port : host + ":" + port; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testJoin() { assertEquals("", Utils.join(Collections.emptyList(), ",")); assertEquals("1", Utils.join(Arrays.asList("1"), ",")); assertEquals("1,2,3", Utils.join(Arrays.asList(1, 2, 3), ",")); }
|
public static <T> String join(T[] strs, String separator) { return join(Arrays.asList(strs), separator); }
|
Utils { public static <T> String join(T[] strs, String separator) { return join(Arrays.asList(strs), separator); } }
|
Utils { public static <T> String join(T[] strs, String separator) { return join(Arrays.asList(strs), separator); } }
|
Utils { public static <T> String join(T[] strs, String separator) { return join(Arrays.asList(strs), separator); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static <T> String join(T[] strs, String separator) { return join(Arrays.asList(strs), separator); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testAbs() { assertEquals(0, Utils.abs(Integer.MIN_VALUE)); assertEquals(10, Utils.abs(-10)); assertEquals(10, Utils.abs(10)); assertEquals(0, Utils.abs(0)); assertEquals(1, Utils.abs(-1)); }
|
public static int abs(int n) { return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); }
|
Utils { public static int abs(int n) { return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); } }
|
Utils { public static int abs(int n) { return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); } }
|
Utils { public static int abs(int n) { return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static int abs(int n) { return (n == Integer.MIN_VALUE) ? 0 : Math.abs(n); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void toArray() { byte[] input = {0, 1, 2, 3, 4}; ByteBuffer buffer = ByteBuffer.wrap(input); assertArrayEquals(input, Utils.toArray(buffer)); assertEquals(0, buffer.position()); assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2)); assertEquals(0, buffer.position()); buffer.position(2); assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer)); assertEquals(2, buffer.position()); }
|
public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void toArrayDirectByteBuffer() { byte[] input = {0, 1, 2, 3, 4}; ByteBuffer buffer = ByteBuffer.allocateDirect(5); buffer.put(input); buffer.rewind(); assertArrayEquals(input, Utils.toArray(buffer)); assertEquals(0, buffer.position()); assertArrayEquals(new byte[] {1, 2}, Utils.toArray(buffer, 1, 2)); assertEquals(0, buffer.position()); buffer.position(2); assertArrayEquals(new byte[] {2, 3, 4}, Utils.toArray(buffer)); assertEquals(2, buffer.position()); }
|
public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static byte[] toArray(ByteBuffer buffer) { return toArray(buffer, 0, buffer.remaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testReadBytes() { byte[] myvar = "Any String you want".getBytes(); ByteBuffer buffer = ByteBuffer.allocate(myvar.length); buffer.put(myvar); buffer.rewind(); this.subTest(buffer); buffer = ByteBuffer.wrap(myvar).asReadOnlyBuffer(); this.subTest(buffer); }
|
public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { byte[] dest = new byte[length]; if (buffer.hasArray()) { System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); } else { buffer.mark(); buffer.position(offset); buffer.get(dest, 0, length); buffer.reset(); } return dest; }
|
Utils { public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { byte[] dest = new byte[length]; if (buffer.hasArray()) { System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); } else { buffer.mark(); buffer.position(offset); buffer.get(dest, 0, length); buffer.reset(); } return dest; } }
|
Utils { public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { byte[] dest = new byte[length]; if (buffer.hasArray()) { System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); } else { buffer.mark(); buffer.position(offset); buffer.get(dest, 0, length); buffer.reset(); } return dest; } }
|
Utils { public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { byte[] dest = new byte[length]; if (buffer.hasArray()) { System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); } else { buffer.mark(); buffer.position(offset); buffer.get(dest, 0, length); buffer.reset(); } return dest; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static byte[] readBytes(ByteBuffer buffer, int offset, int length) { byte[] dest = new byte[length]; if (buffer.hasArray()) { System.arraycopy(buffer.array(), buffer.arrayOffset() + offset, dest, 0, length); } else { buffer.mark(); buffer.position(offset); buffer.get(dest, 0, length); buffer.reset(); } return dest; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testMin() { assertEquals(1, Utils.min(1)); assertEquals(1, Utils.min(1, 2, 3)); assertEquals(1, Utils.min(2, 1, 3)); assertEquals(1, Utils.min(2, 3, 1)); }
|
public static long min(long first, long ... rest) { long min = first; for (long r : rest) { if (r < min) min = r; } return min; }
|
Utils { public static long min(long first, long ... rest) { long min = first; for (long r : rest) { if (r < min) min = r; } return min; } }
|
Utils { public static long min(long first, long ... rest) { long min = first; for (long r : rest) { if (r < min) min = r; } return min; } }
|
Utils { public static long min(long first, long ... rest) { long min = first; for (long r : rest) { if (r < min) min = r; } return min; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static long min(long first, long ... rest) { long min = first; for (long r : rest) { if (r < min) min = r; } return min; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void timestampToJson() throws IOException { GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 2000000000); calendar.add(Calendar.MILLISECOND, 2000000000); java.util.Date date = calendar.getTime(); JsonNode converted = parse(converter.fromConnectData(TOPIC, Timestamp.SCHEMA, date)); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"int64\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1 }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); JsonNode payload = converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertTrue(payload.isLong()); assertEquals(4000000000L, payload.longValue()); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void testCloseAll() { TestCloseable[] closeablesWithoutException = TestCloseable.createCloseables(false, false, false); try { Utils.closeAll(closeablesWithoutException); TestCloseable.checkClosed(closeablesWithoutException); } catch (IOException e) { fail("Unexpected exception: " + e); } TestCloseable[] closeablesWithException = TestCloseable.createCloseables(true, true, true); try { Utils.closeAll(closeablesWithException); fail("Expected exception not thrown"); } catch (IOException e) { TestCloseable.checkClosed(closeablesWithException); TestCloseable.checkException(e, closeablesWithException); } TestCloseable[] singleExceptionCloseables = TestCloseable.createCloseables(false, true, false); try { Utils.closeAll(singleExceptionCloseables); fail("Expected exception not thrown"); } catch (IOException e) { TestCloseable.checkClosed(singleExceptionCloseables); TestCloseable.checkException(e, singleExceptionCloseables[1]); } TestCloseable[] mixedCloseables = TestCloseable.createCloseables(false, true, false, true, true); try { Utils.closeAll(mixedCloseables); fail("Expected exception not thrown"); } catch (IOException e) { TestCloseable.checkClosed(mixedCloseables); TestCloseable.checkException(e, mixedCloseables[1], mixedCloseables[3], mixedCloseables[4]); } }
|
public static void closeAll(Closeable... closeables) throws IOException { IOException exception = null; for (Closeable closeable : closeables) { try { closeable.close(); } catch (IOException e) { if (exception != null) exception.addSuppressed(e); else exception = e; } } if (exception != null) throw exception; }
|
Utils { public static void closeAll(Closeable... closeables) throws IOException { IOException exception = null; for (Closeable closeable : closeables) { try { closeable.close(); } catch (IOException e) { if (exception != null) exception.addSuppressed(e); else exception = e; } } if (exception != null) throw exception; } }
|
Utils { public static void closeAll(Closeable... closeables) throws IOException { IOException exception = null; for (Closeable closeable : closeables) { try { closeable.close(); } catch (IOException e) { if (exception != null) exception.addSuppressed(e); else exception = e; } } if (exception != null) throw exception; } }
|
Utils { public static void closeAll(Closeable... closeables) throws IOException { IOException exception = null; for (Closeable closeable : closeables) { try { closeable.close(); } catch (IOException e) { if (exception != null) exception.addSuppressed(e); else exception = e; } } if (exception != null) throw exception; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static void closeAll(Closeable... closeables) throws IOException { IOException exception = null; for (Closeable closeable : closeables) { try { closeable.close(); } catch (IOException e) { if (exception != null) exception.addSuppressed(e); else exception = e; } } if (exception != null) throw exception; } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testReadFullyOrFailWithRealFile() throws IOException { try (FileChannel channel = FileChannel.open(TestUtils.tempFile().toPath(), StandardOpenOption.READ, StandardOpenOption.WRITE)) { String msg = "hello, world"; channel.write(ByteBuffer.wrap(msg.getBytes()), 0); channel.force(true); assertEquals("Message should be written to the file channel", channel.size(), msg.length()); ByteBuffer perfectBuffer = ByteBuffer.allocate(msg.length()); ByteBuffer smallBuffer = ByteBuffer.allocate(5); ByteBuffer largeBuffer = ByteBuffer.allocate(msg.length() + 1); Utils.readFullyOrFail(channel, perfectBuffer, 0, "perfect"); assertFalse("Buffer should be filled up", perfectBuffer.hasRemaining()); assertEquals("Buffer should be populated correctly", msg, new String(perfectBuffer.array())); Utils.readFullyOrFail(channel, smallBuffer, 0, "small"); assertFalse("Buffer should be filled", smallBuffer.hasRemaining()); assertEquals("Buffer should be populated correctly", "hello", new String(smallBuffer.array())); smallBuffer.clear(); Utils.readFullyOrFail(channel, smallBuffer, 7, "small"); assertFalse("Buffer should be filled", smallBuffer.hasRemaining()); assertEquals("Buffer should be populated correctly", "world", new String(smallBuffer.array())); try { Utils.readFullyOrFail(channel, largeBuffer, 0, "large"); fail("Expected EOFException to be raised"); } catch (EOFException e) { } } }
|
public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testReadFullyOrFailWithPartialFileChannelReads() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); final int bufferSize = 100; ByteBuffer buffer = ByteBuffer.allocate(bufferSize); StringBuilder expectedBufferContent = new StringBuilder(); fileChannelMockExpectReadWithRandomBytes(channelMock, expectedBufferContent, bufferSize); EasyMock.replay(channelMock); Utils.readFullyOrFail(channelMock, buffer, 0L, "test"); assertEquals("The buffer should be populated correctly", expectedBufferContent.toString(), new String(buffer.array())); assertFalse("The buffer should be filled", buffer.hasRemaining()); EasyMock.verify(channelMock); }
|
public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position, String description) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } int expectedReadBytes = destinationBuffer.remaining(); readFully(channel, destinationBuffer, position); if (destinationBuffer.hasRemaining()) { throw new EOFException(String.format("Failed to read `%s` from file channel `%s`. Expected to read %d bytes, " + "but reached end of file after reading %d bytes. Started read from position %d.", description, channel, expectedReadBytes, expectedReadBytes - destinationBuffer.remaining(), position)); } } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testReadFullyWithPartialFileChannelReads() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); final int bufferSize = 100; StringBuilder expectedBufferContent = new StringBuilder(); fileChannelMockExpectReadWithRandomBytes(channelMock, expectedBufferContent, bufferSize); EasyMock.replay(channelMock); ByteBuffer buffer = ByteBuffer.allocate(bufferSize); Utils.readFully(channelMock, buffer, 0L); assertEquals("The buffer should be populated correctly.", expectedBufferContent.toString(), new String(buffer.array())); assertFalse("The buffer should be filled", buffer.hasRemaining()); EasyMock.verify(channelMock); }
|
public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testReadFullyIfEofIsReached() throws IOException { final FileChannel channelMock = EasyMock.createMock(FileChannel.class); final int bufferSize = 100; final String fileChannelContent = "abcdefghkl"; ByteBuffer buffer = ByteBuffer.allocate(bufferSize); EasyMock.expect(channelMock.size()).andReturn((long) fileChannelContent.length()); EasyMock.expect(channelMock.read(EasyMock.anyObject(ByteBuffer.class), EasyMock.anyInt())).andAnswer(new IAnswer<Integer>() { @Override public Integer answer() throws Throwable { ByteBuffer buffer = (ByteBuffer) EasyMock.getCurrentArguments()[0]; buffer.put(fileChannelContent.getBytes()); return -1; } }); EasyMock.replay(channelMock); Utils.readFully(channelMock, buffer, 0L); assertEquals("abcdefghkl", new String(buffer.array(), 0, buffer.position())); assertEquals(buffer.position(), channelMock.size()); assertTrue(buffer.hasRemaining()); EasyMock.verify(channelMock); }
|
public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position) throws IOException { if (position < 0) { throw new IllegalArgumentException("The file channel position cannot be negative, but it is " + position); } long currentPosition = position; int bytesRead; do { bytesRead = channel.read(destinationBuffer, currentPosition); currentPosition += bytesRead; } while (bytesRead != -1 && destinationBuffer.hasRemaining()); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test(timeout = 120000) public void testRecursiveDelete() throws IOException { Utils.delete(null); File tempFile = TestUtils.tempFile(); Utils.delete(tempFile); assertFalse(Files.exists(tempFile.toPath())); File tempDir = TestUtils.tempDirectory(); File tempDir2 = TestUtils.tempDirectory(tempDir.toPath(), "a"); TestUtils.tempDirectory(tempDir.toPath(), "b"); TestUtils.tempDirectory(tempDir2.toPath(), "c"); Utils.delete(tempDir); assertFalse(Files.exists(tempDir.toPath())); assertFalse(Files.exists(tempDir2.toPath())); Utils.delete(tempDir); assertFalse(Files.exists(tempDir.toPath())); }
|
public static void delete(final File file) throws IOException { if (file == null) return; Files.walkFileTree(file.toPath(), new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException && path.toFile().equals(file)) return FileVisitResult.TERMINATE; throw exc; } @Override public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } }); }
|
Utils { public static void delete(final File file) throws IOException { if (file == null) return; Files.walkFileTree(file.toPath(), new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException && path.toFile().equals(file)) return FileVisitResult.TERMINATE; throw exc; } @Override public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } }); } }
|
Utils { public static void delete(final File file) throws IOException { if (file == null) return; Files.walkFileTree(file.toPath(), new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException && path.toFile().equals(file)) return FileVisitResult.TERMINATE; throw exc; } @Override public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } }); } }
|
Utils { public static void delete(final File file) throws IOException { if (file == null) return; Files.walkFileTree(file.toPath(), new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException && path.toFile().equals(file)) return FileVisitResult.TERMINATE; throw exc; } @Override public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } }); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); }
|
Utils { public static void delete(final File file) throws IOException { if (file == null) return; Files.walkFileTree(file.toPath(), new SimpleFileVisitor<Path>() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException && path.toFile().equals(file)) return FileVisitResult.TERMINATE; throw exc; } @Override public FileVisitResult visitFile(Path path, BasicFileAttributes attrs) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } @Override public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOException { Files.delete(path); return FileVisitResult.CONTINUE; } }); } static List<T> sorted(Collection<T> collection); static String utf8(byte[] bytes); static String utf8(ByteBuffer buffer, int length); static String utf8(ByteBuffer buffer, int offset, int length); static byte[] utf8(String string); static int abs(int n); static long min(long first, long ... rest); static long max(long first, long ... rest); static short min(short first, short second); static int utf8Length(CharSequence s); static byte[] toArray(ByteBuffer buffer); static byte[] toArray(ByteBuffer buffer, int size); static byte[] toNullableArray(ByteBuffer buffer); static ByteBuffer wrapNullable(byte[] array); static byte[] toArray(ByteBuffer buffer, int offset, int size); static T notNull(T t); static void sleep(long ms); static T newInstance(Class<T> c); static T newInstance(String klass, Class<T> base); static int murmur2(final byte[] data); static String getHost(String address); static Integer getPort(String address); static String formatAddress(String host, Integer port); static String join(T[] strs, String separator); static String join(Collection<T> list, String separator); static String mkString(Map<K, V> map, String begin, String end,
String keyValueSeparator, String elementSeparator); static Properties loadProps(String filename); static Map<String, String> propsToStringMap(Properties props); static String stackTrace(Throwable e); static Thread newThread(String name, Runnable runnable, boolean daemon); static Thread daemonThread(String name, Runnable runnable); static void croak(String message); static byte[] readBytes(ByteBuffer buffer, int offset, int length); static byte[] readBytes(ByteBuffer buffer); static String readFileAsString(String path, Charset charset); static String readFileAsString(String path); static ByteBuffer ensureCapacity(ByteBuffer existingBuffer, int newLength); @SafeVarargs static Set<T> mkSet(T... elems); @SafeVarargs static List<T> mkList(T... elems); static void delete(final File file); static List<T> safe(List<T> other); static ClassLoader getKafkaClassLoader(); static ClassLoader getContextOrKafkaClassLoader(); static void atomicMoveWithFallback(Path source, Path target); static void closeAll(Closeable... closeables); static void closeQuietly(AutoCloseable closeable, String name); static int toPositive(int number); static int longHashcode(long value); static ByteBuffer sizeDelimited(ByteBuffer buffer, int start); static void readFullyOrFail(FileChannel channel, ByteBuffer destinationBuffer, long position,
String description); static void readFully(FileChannel channel, ByteBuffer destinationBuffer, long position); static final void readFully(InputStream inputStream, ByteBuffer destinationBuffer); static void writeFully(FileChannel channel, ByteBuffer sourceBuffer); static void writeTo(DataOutput out, ByteBuffer buffer, int length); static List<T> toList(Iterator<T> iterator); static final String NL; }
|
@Test public void testEchoHello() throws Exception { assumeTrue(!OperatingSystem.IS_WINDOWS); String output = Shell.execCommand("echo", "hello"); assertEquals("hello\n", output); }
|
public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); int exitCode(); Process process(); static String execCommand(String ... cmd); static String execCommand(String[] cmd, long timeout); }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); int exitCode(); Process process(); static String execCommand(String ... cmd); static String execCommand(String[] cmd, long timeout); }
|
@Test public void testHeadDevZero() throws Exception { assumeTrue(!OperatingSystem.IS_WINDOWS); final int length = 100000; String output = Shell.execCommand("head", "-c", Integer.toString(length), "/dev/zero"); assertEquals(length, output.length()); }
|
public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); int exitCode(); Process process(); static String execCommand(String ... cmd); static String execCommand(String[] cmd, long timeout); }
|
Shell { public static String execCommand(String ... cmd) throws IOException { return execCommand(cmd, -1); } Shell(long timeout); int exitCode(); Process process(); static String execCommand(String ... cmd); static String execCommand(String[] cmd, long timeout); }
|
@Test public void testChecksum() { assertEquals(record.checksum(), record.computeChecksum()); byte attributes = LegacyRecord.computeAttributes(magic, this.compression, TimestampType.CREATE_TIME); assertEquals(record.checksum(), LegacyRecord.computeChecksum( magic, attributes, this.timestamp, this.key == null ? null : this.key.array(), this.value == null ? null : this.value.array() )); assertTrue(record.isValid()); for (int i = LegacyRecord.CRC_OFFSET + LegacyRecord.CRC_LENGTH; i < record.sizeInBytes(); i++) { LegacyRecord copy = copyOf(record); copy.buffer().put(i, (byte) 69); assertFalse(copy.isValid()); try { copy.ensureValid(); fail("Should fail the above test."); } catch (InvalidRecordException e) { } } }
|
public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); }
|
LegacyRecord { public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); } }
|
LegacyRecord { public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); } LegacyRecord(ByteBuffer buffer); LegacyRecord(ByteBuffer buffer, Long wrapperRecordTimestamp, TimestampType wrapperRecordTimestampType); }
|
LegacyRecord { public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); } LegacyRecord(ByteBuffer buffer); LegacyRecord(ByteBuffer buffer, Long wrapperRecordTimestamp, TimestampType wrapperRecordTimestampType); long computeChecksum(); long checksum(); boolean isValid(); Long wrapperRecordTimestamp(); TimestampType wrapperRecordTimestampType(); void ensureValid(); int sizeInBytes(); int keySize(); boolean hasKey(); int valueSize(); boolean hasNullValue(); byte magic(); byte attributes(); long timestamp(); TimestampType timestampType(); CompressionType compressionType(); ByteBuffer value(); ByteBuffer key(); ByteBuffer buffer(); String toString(); boolean equals(Object other); int hashCode(); static LegacyRecord create(byte magic,
long timestamp,
byte[] key,
byte[] value,
CompressionType compressionType,
TimestampType timestampType); static LegacyRecord create(byte magic, long timestamp, byte[] key, byte[] value); static void writeCompressedRecordHeader(ByteBuffer buffer,
byte magic,
int recordSize,
long timestamp,
CompressionType compressionType,
TimestampType timestampType); static long write(DataOutputStream out,
byte magic,
long timestamp,
byte[] key,
byte[] value,
CompressionType compressionType,
TimestampType timestampType); static long write(DataOutputStream out,
byte magic,
long timestamp,
ByteBuffer key,
ByteBuffer value,
CompressionType compressionType,
TimestampType timestampType); static void write(DataOutputStream out,
byte magic,
long crc,
byte attributes,
long timestamp,
byte[] key,
byte[] value); static int recordSize(byte magic, int keySize, int valueSize); static byte computeAttributes(byte magic, CompressionType type, TimestampType timestampType); static long computeChecksum(byte magic, byte attributes, long timestamp, byte[] key, byte[] value); static TimestampType timestampType(byte magic, TimestampType wrapperRecordTimestampType, byte attributes); }
|
LegacyRecord { public long checksum() { return ByteUtils.readUnsignedInt(buffer, CRC_OFFSET); } LegacyRecord(ByteBuffer buffer); LegacyRecord(ByteBuffer buffer, Long wrapperRecordTimestamp, TimestampType wrapperRecordTimestampType); long computeChecksum(); long checksum(); boolean isValid(); Long wrapperRecordTimestamp(); TimestampType wrapperRecordTimestampType(); void ensureValid(); int sizeInBytes(); int keySize(); boolean hasKey(); int valueSize(); boolean hasNullValue(); byte magic(); byte attributes(); long timestamp(); TimestampType timestampType(); CompressionType compressionType(); ByteBuffer value(); ByteBuffer key(); ByteBuffer buffer(); String toString(); boolean equals(Object other); int hashCode(); static LegacyRecord create(byte magic,
long timestamp,
byte[] key,
byte[] value,
CompressionType compressionType,
TimestampType timestampType); static LegacyRecord create(byte magic, long timestamp, byte[] key, byte[] value); static void writeCompressedRecordHeader(ByteBuffer buffer,
byte magic,
int recordSize,
long timestamp,
CompressionType compressionType,
TimestampType timestampType); static long write(DataOutputStream out,
byte magic,
long timestamp,
byte[] key,
byte[] value,
CompressionType compressionType,
TimestampType timestampType); static long write(DataOutputStream out,
byte magic,
long timestamp,
ByteBuffer key,
ByteBuffer value,
CompressionType compressionType,
TimestampType timestampType); static void write(DataOutputStream out,
byte magic,
long crc,
byte attributes,
long timestamp,
byte[] key,
byte[] value); static int recordSize(byte magic, int keySize, int valueSize); static byte computeAttributes(byte magic, CompressionType type, TimestampType timestampType); static long computeChecksum(byte magic, byte attributes, long timestamp, byte[] key, byte[] value); static TimestampType timestampType(byte magic, TimestampType wrapperRecordTimestampType, byte attributes); static final int CRC_OFFSET; static final int CRC_LENGTH; static final int MAGIC_OFFSET; static final int MAGIC_LENGTH; static final int ATTRIBUTES_OFFSET; static final int ATTRIBUTES_LENGTH; static final int TIMESTAMP_OFFSET; static final int TIMESTAMP_LENGTH; static final int KEY_SIZE_OFFSET_V0; static final int KEY_SIZE_OFFSET_V1; static final int KEY_SIZE_LENGTH; static final int KEY_OFFSET_V0; static final int KEY_OFFSET_V1; static final int VALUE_SIZE_LENGTH; static final int HEADER_SIZE_V0; static final int HEADER_SIZE_V1; static final int RECORD_OVERHEAD_V0; static final int RECORD_OVERHEAD_V1; static final long NO_TIMESTAMP; }
|
@Test public void nullSchemaAndPrimitiveToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, null, true)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(true, converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).booleanValue()); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void testWriteTo() throws IOException { try (FileRecords fileRecords = FileRecords.open(tempFile())) { fileRecords.append(MemoryRecords.withRecords(magic, compression, new SimpleRecord("foo".getBytes()))); fileRecords.flush(); FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes()); FileChannelRecordBatch batch = logInputStream.nextBatch(); assertNotNull(batch); assertEquals(magic, batch.magic()); ByteBuffer buffer = ByteBuffer.allocate(128); batch.writeTo(buffer); buffer.flip(); MemoryRecords memRecords = MemoryRecords.readableRecords(buffer); List<Record> records = Utils.toList(memRecords.records().iterator()); assertEquals(1, records.size()); Record record0 = records.get(0); assertTrue(record0.hasMagic(magic)); assertEquals("foo", Utils.utf8(record0.value(), record0.valueSize())); } }
|
@Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
@Test public void testSimpleBatchIteration() throws IOException { try (FileRecords fileRecords = FileRecords.open(tempFile())) { SimpleRecord firstBatchRecord = new SimpleRecord(3241324L, "a".getBytes(), "foo".getBytes()); SimpleRecord secondBatchRecord = new SimpleRecord(234280L, "b".getBytes(), "bar".getBytes()); fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord)); fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord)); fileRecords.flush(); FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes()); FileChannelRecordBatch firstBatch = logInputStream.nextBatch(); assertGenericRecordBatchData(firstBatch, 0L, 3241324L, firstBatchRecord); assertNoProducerData(firstBatch); FileChannelRecordBatch secondBatch = logInputStream.nextBatch(); assertGenericRecordBatchData(secondBatch, 1L, 234280L, secondBatchRecord); assertNoProducerData(secondBatch); assertNull(logInputStream.nextBatch()); } }
|
@Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
@Test public void testBatchIterationWithMultipleRecordsPerBatch() throws IOException { if (magic < MAGIC_VALUE_V2 && compression == CompressionType.NONE) return; try (FileRecords fileRecords = FileRecords.open(tempFile())) { SimpleRecord[] firstBatchRecords = new SimpleRecord[]{ new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) }; SimpleRecord[] secondBatchRecords = new SimpleRecord[]{ new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) }; fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecords)); fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecords)); fileRecords.flush(); FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes()); FileChannelRecordBatch firstBatch = logInputStream.nextBatch(); assertNoProducerData(firstBatch); assertGenericRecordBatchData(firstBatch, 0L, 3241324L, firstBatchRecords); FileChannelRecordBatch secondBatch = logInputStream.nextBatch(); assertNoProducerData(secondBatch); assertGenericRecordBatchData(secondBatch, 1L, 238423489L, secondBatchRecords); assertNull(logInputStream.nextBatch()); } }
|
@Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
@Test public void testBatchIterationV2() throws IOException { if (magic != MAGIC_VALUE_V2) return; try (FileRecords fileRecords = FileRecords.open(tempFile())) { long producerId = 83843L; short producerEpoch = 15; int baseSequence = 234; int partitionLeaderEpoch = 9832; SimpleRecord[] firstBatchRecords = new SimpleRecord[]{ new SimpleRecord(3241324L, "a".getBytes(), "1".getBytes()), new SimpleRecord(234280L, "b".getBytes(), "2".getBytes()) }; SimpleRecord[] secondBatchRecords = new SimpleRecord[]{ new SimpleRecord(238423489L, "c".getBytes(), "3".getBytes()), new SimpleRecord(897839L, null, "4".getBytes()), new SimpleRecord(8234020L, "e".getBytes(), null) }; fileRecords.append(MemoryRecords.withIdempotentRecords(magic, 15L, compression, producerId, producerEpoch, baseSequence, partitionLeaderEpoch, firstBatchRecords)); fileRecords.append(MemoryRecords.withTransactionalRecords(magic, 27L, compression, producerId, producerEpoch, baseSequence + firstBatchRecords.length, partitionLeaderEpoch, secondBatchRecords)); fileRecords.flush(); FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes()); FileChannelRecordBatch firstBatch = logInputStream.nextBatch(); assertProducerData(firstBatch, producerId, producerEpoch, baseSequence, false, firstBatchRecords); assertGenericRecordBatchData(firstBatch, 15L, 3241324L, firstBatchRecords); assertEquals(partitionLeaderEpoch, firstBatch.partitionLeaderEpoch()); FileChannelRecordBatch secondBatch = logInputStream.nextBatch(); assertProducerData(secondBatch, producerId, producerEpoch, baseSequence + firstBatchRecords.length, true, secondBatchRecords); assertGenericRecordBatchData(secondBatch, 27L, 238423489L, secondBatchRecords); assertEquals(partitionLeaderEpoch, secondBatch.partitionLeaderEpoch()); assertNull(logInputStream.nextBatch()); } }
|
@Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
@Test public void testBatchIterationIncompleteBatch() throws IOException { try (FileRecords fileRecords = FileRecords.open(tempFile())) { SimpleRecord firstBatchRecord = new SimpleRecord(100L, "foo".getBytes()); SimpleRecord secondBatchRecord = new SimpleRecord(200L, "bar".getBytes()); fileRecords.append(MemoryRecords.withRecords(magic, 0L, compression, CREATE_TIME, firstBatchRecord)); fileRecords.append(MemoryRecords.withRecords(magic, 1L, compression, CREATE_TIME, secondBatchRecord)); fileRecords.flush(); fileRecords.truncateTo(fileRecords.sizeInBytes() - 13); FileLogInputStream logInputStream = new FileLogInputStream(fileRecords.channel(), 0, fileRecords.sizeInBytes()); FileChannelRecordBatch firstBatch = logInputStream.nextBatch(); assertNoProducerData(firstBatch); assertGenericRecordBatchData(firstBatch, 0L, 100L, firstBatchRecord); assertNull(logInputStream.nextBatch()); } }
|
@Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
FileLogInputStream implements LogInputStream<FileLogInputStream.FileChannelRecordBatch> { @Override public FileChannelRecordBatch nextBatch() throws IOException { if (position + HEADER_SIZE_UP_TO_MAGIC >= end) return null; logHeaderBuffer.rewind(); Utils.readFullyOrFail(channel, logHeaderBuffer, position, "log header"); logHeaderBuffer.rewind(); long offset = logHeaderBuffer.getLong(OFFSET_OFFSET); int size = logHeaderBuffer.getInt(SIZE_OFFSET); if (size < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is smaller than minimum record overhead (%d).", LegacyRecord.RECORD_OVERHEAD_V0)); if (position + LOG_OVERHEAD + size > end) return null; byte magic = logHeaderBuffer.get(MAGIC_OFFSET); final FileChannelRecordBatch batch; if (magic < RecordBatch.MAGIC_VALUE_V2) batch = new LegacyFileChannelRecordBatch(offset, magic, channel, position, size); else batch = new DefaultFileChannelRecordBatch(offset, magic, channel, position, size); position += batch.sizeInBytes(); return batch; } FileLogInputStream(FileChannel channel,
int start,
int end); @Override FileChannelRecordBatch nextBatch(); }
|
@Test(expected = InvalidRecordException.class) public void testInvalidKeySize() { byte attributes = 0; long timestampDelta = 2; int offsetDelta = 1; int sizeOfBodyInBytes = 100; int keySize = 105; ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes)); ByteUtils.writeVarint(sizeOfBodyInBytes, buf); buf.put(attributes); ByteUtils.writeVarlong(timestampDelta, buf); ByteUtils.writeVarint(offsetDelta, buf); ByteUtils.writeVarint(keySize, buf); buf.position(buf.limit()); buf.flip(); DefaultRecord.readFrom(buf, 0L, 0L, RecordBatch.NO_SEQUENCE, null); }
|
public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); static final int MAX_RECORD_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testInvalidValueSize() throws IOException { byte attributes = 0; long timestampDelta = 2; int offsetDelta = 1; int sizeOfBodyInBytes = 100; int valueSize = 105; ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes)); ByteUtils.writeVarint(sizeOfBodyInBytes, buf); buf.put(attributes); ByteUtils.writeVarlong(timestampDelta, buf); ByteUtils.writeVarint(offsetDelta, buf); ByteUtils.writeVarint(-1, buf); ByteUtils.writeVarint(valueSize, buf); buf.position(buf.limit()); buf.flip(); DefaultRecord.readFrom(buf, 0L, 0L, RecordBatch.NO_SEQUENCE, null); }
|
public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); static final int MAX_RECORD_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testUnderflowReadingTimestamp() { byte attributes = 0; int sizeOfBodyInBytes = 1; ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes)); ByteUtils.writeVarint(sizeOfBodyInBytes, buf); buf.put(attributes); buf.flip(); DefaultRecord.readFrom(buf, 0L, 0L, RecordBatch.NO_SEQUENCE, null); }
|
public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); static final int MAX_RECORD_OVERHEAD; }
|
@Test public void nullSchemaAndArrayToJson() { JsonNode converted = parse(converter.fromConnectData(TOPIC, null, Arrays.asList(1, "string", true))); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(JsonNodeFactory.instance.arrayNode().add(1).add("string").add(true), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test(expected = InvalidRecordException.class) public void testUnderflowReadingVarlong() { byte attributes = 0; int sizeOfBodyInBytes = 2; ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + 1); ByteUtils.writeVarint(sizeOfBodyInBytes, buf); buf.put(attributes); ByteUtils.writeVarlong(156, buf); buf.position(buf.limit() - 1); buf.flip(); DefaultRecord.readFrom(buf, 0L, 0L, RecordBatch.NO_SEQUENCE, null); }
|
public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); static final int MAX_RECORD_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testInvalidVarlong() { byte attributes = 0; int sizeOfBodyInBytes = 11; ByteBuffer buf = ByteBuffer.allocate(sizeOfBodyInBytes + ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + 1); ByteUtils.writeVarint(sizeOfBodyInBytes, buf); int recordStartPosition = buf.position(); buf.put(attributes); ByteUtils.writeVarlong(Long.MAX_VALUE, buf); buf.put(recordStartPosition + 10, Byte.MIN_VALUE); buf.flip(); DefaultRecord.readFrom(buf, 0L, 0L, RecordBatch.NO_SEQUENCE, null); }
|
public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); }
|
DefaultRecord implements Record { public static DefaultRecord readFrom(DataInput input, long baseOffset, long baseTimestamp, int baseSequence, Long logAppendTime) throws IOException { int sizeOfBodyInBytes = ByteUtils.readVarint(input); ByteBuffer recordBuffer = ByteBuffer.allocate(sizeOfBodyInBytes); input.readFully(recordBuffer.array(), 0, sizeOfBodyInBytes); int totalSizeInBytes = ByteUtils.sizeOfVarint(sizeOfBodyInBytes) + sizeOfBodyInBytes; return readFrom(recordBuffer, totalSizeInBytes, sizeOfBodyInBytes, baseOffset, baseTimestamp, baseSequence, logAppendTime); } private DefaultRecord(int sizeInBytes,
byte attributes,
long offset,
long timestamp,
int sequence,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override long offset(); @Override int sequence(); @Override int sizeInBytes(); @Override long timestamp(); byte attributes(); @Override Long checksumOrNull(); @Override boolean isValid(); @Override void ensureValid(); @Override int keySize(); @Override int valueSize(); @Override boolean hasKey(); @Override ByteBuffer key(); @Override boolean hasValue(); @Override ByteBuffer value(); @Override Header[] headers(); static int writeTo(DataOutputStream out,
int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); @Override boolean hasMagic(byte magic); @Override boolean isCompressed(); @Override boolean hasTimestampType(TimestampType timestampType); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static DefaultRecord readFrom(DataInput input,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static DefaultRecord readFrom(ByteBuffer buffer,
long baseOffset,
long baseTimestamp,
int baseSequence,
Long logAppendTime); static int sizeInBytes(int offsetDelta,
long timestampDelta,
ByteBuffer key,
ByteBuffer value,
Header[] headers); static int sizeInBytes(int offsetDelta,
long timestampDelta,
int keySize,
int valueSize,
Header[] headers); static long computePartialChecksum(long timestamp, int serializedKeySize, int serializedValueSize); static final int MAX_RECORD_OVERHEAD; }
|
@Test public void testIterator() { ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, magic, compression, TimestampType.CREATE_TIME, firstOffset, logAppendTime, pid, epoch, firstSequence, false, false, partitionLeaderEpoch, buffer.limit()); SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes()), new SimpleRecord(4L, null, "4".getBytes()), new SimpleRecord(5L, "d".getBytes(), null), new SimpleRecord(6L, (byte[]) null, null) }; for (SimpleRecord record : records) builder.append(record); MemoryRecords memoryRecords = builder.build(); for (int iteration = 0; iteration < 2; iteration++) { int total = 0; for (RecordBatch batch : memoryRecords.batches()) { assertTrue(batch.isValid()); assertEquals(compression, batch.compressionType()); assertEquals(firstOffset + total, batch.baseOffset()); if (magic >= RecordBatch.MAGIC_VALUE_V2) { assertEquals(pid, batch.producerId()); assertEquals(epoch, batch.producerEpoch()); assertEquals(firstSequence + total, batch.baseSequence()); assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch()); assertEquals(records.length, batch.countOrNull().intValue()); assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); assertEquals(records[records.length - 1].timestamp(), batch.maxTimestamp()); } else { assertEquals(RecordBatch.NO_PRODUCER_ID, batch.producerId()); assertEquals(RecordBatch.NO_PRODUCER_EPOCH, batch.producerEpoch()); assertEquals(RecordBatch.NO_SEQUENCE, batch.baseSequence()); assertEquals(RecordBatch.NO_PARTITION_LEADER_EPOCH, batch.partitionLeaderEpoch()); assertNull(batch.countOrNull()); if (magic == RecordBatch.MAGIC_VALUE_V0) assertEquals(TimestampType.NO_TIMESTAMP_TYPE, batch.timestampType()); else assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); } int recordCount = 0; for (Record record : batch) { assertTrue(record.isValid()); assertTrue(record.hasMagic(batch.magic())); assertFalse(record.isCompressed()); assertEquals(firstOffset + total, record.offset()); assertEquals(records[total].key(), record.key()); assertEquals(records[total].value(), record.value()); if (magic >= RecordBatch.MAGIC_VALUE_V2) assertEquals(firstSequence + total, record.sequence()); assertFalse(record.hasTimestampType(TimestampType.LOG_APPEND_TIME)); if (magic == RecordBatch.MAGIC_VALUE_V0) { assertEquals(RecordBatch.NO_TIMESTAMP, record.timestamp()); assertFalse(record.hasTimestampType(TimestampType.CREATE_TIME)); assertTrue(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); } else { assertEquals(records[total].timestamp(), record.timestamp()); assertFalse(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); if (magic < RecordBatch.MAGIC_VALUE_V2) assertTrue(record.hasTimestampType(TimestampType.CREATE_TIME)); else assertFalse(record.hasTimestampType(TimestampType.CREATE_TIME)); } total++; recordCount++; } assertEquals(batch.baseOffset() + recordCount - 1, batch.lastOffset()); } } }
|
@Override public Iterable<MutableRecordBatch> batches() { return batches; }
|
MemoryRecords extends AbstractRecords { @Override public Iterable<MutableRecordBatch> batches() { return batches; } }
|
MemoryRecords extends AbstractRecords { @Override public Iterable<MutableRecordBatch> batches() { return batches; } private MemoryRecords(ByteBuffer buffer); }
|
MemoryRecords extends AbstractRecords { @Override public Iterable<MutableRecordBatch> batches() { return batches; } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); }
|
MemoryRecords extends AbstractRecords { @Override public Iterable<MutableRecordBatch> batches() { return batches; } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); static final MemoryRecords EMPTY; }
|
@Test public void testHasRoomForMethod() { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), magic, compression, TimestampType.CREATE_TIME, 0L); builder.append(0L, "a".getBytes(), "1".getBytes()); assertTrue(builder.hasRoomFor(1L, "b".getBytes(), "2".getBytes(), Record.EMPTY_HEADERS)); builder.close(); assertFalse(builder.hasRoomFor(1L, "b".getBytes(), "2".getBytes(), Record.EMPTY_HEADERS)); }
|
public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } private MemoryRecords(ByteBuffer buffer); }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); static final MemoryRecords EMPTY; }
|
@Test public void testHasRoomForMethodWithHeaders() { if (magic >= RecordBatch.MAGIC_VALUE_V2) { MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(100), magic, compression, TimestampType.CREATE_TIME, 0L); RecordHeaders headers = new RecordHeaders(); headers.add("hello", "world.world".getBytes()); headers.add("hello", "world.world".getBytes()); headers.add("hello", "world.world".getBytes()); headers.add("hello", "world.world".getBytes()); headers.add("hello", "world.world".getBytes()); builder.append(logAppendTime, "key".getBytes(), "value".getBytes()); assertTrue(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), Record.EMPTY_HEADERS)); assertFalse(builder.hasRoomFor(logAppendTime, "key".getBytes(), "value".getBytes(), headers.toArray())); } }
|
public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } private MemoryRecords(ByteBuffer buffer); }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); }
|
MemoryRecords extends AbstractRecords { public static MemoryRecordsBuilder builder(ByteBuffer buffer, CompressionType compressionType, TimestampType timestampType, long baseOffset) { return builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, timestampType, baseOffset); } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); static final MemoryRecords EMPTY; }
|
@Test public void testFilterTo() { ByteBuffer buffer = ByteBuffer.allocate(2048); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 0L); builder.append(10L, null, "a".getBytes()); builder.close(); builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 1L); builder.append(11L, "1".getBytes(), "b".getBytes()); builder.append(12L, null, "c".getBytes()); builder.close(); builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 3L); builder.append(13L, null, "d".getBytes()); builder.append(20L, "4".getBytes(), "e".getBytes()); builder.append(15L, "5".getBytes(), "f".getBytes()); builder.close(); builder = MemoryRecords.builder(buffer, magic, compression, TimestampType.CREATE_TIME, 6L); builder.append(16L, "6".getBytes(), "g".getBytes()); builder.close(); buffer.flip(); ByteBuffer filtered = ByteBuffer.allocate(2048); MemoryRecords.FilterResult result = MemoryRecords.readableRecords(buffer).filterTo( new TopicPartition("foo", 0), new RetainNonNullKeysFilter(), filtered, Integer.MAX_VALUE, BufferSupplier.NO_CACHING); filtered.flip(); assertEquals(7, result.messagesRead); assertEquals(4, result.messagesRetained); assertEquals(buffer.limit(), result.bytesRead); assertEquals(filtered.limit(), result.bytesRetained); if (magic > RecordBatch.MAGIC_VALUE_V0) { assertEquals(20L, result.maxTimestamp); if (compression == CompressionType.NONE && magic < RecordBatch.MAGIC_VALUE_V2) assertEquals(4L, result.shallowOffsetOfMaxTimestamp); else assertEquals(5L, result.shallowOffsetOfMaxTimestamp); } MemoryRecords filteredRecords = MemoryRecords.readableRecords(filtered); List<MutableRecordBatch> batches = TestUtils.toList(filteredRecords.batches()); final List<Long> expectedEndOffsets; final List<Long> expectedStartOffsets; final List<Long> expectedMaxTimestamps; if (magic < RecordBatch.MAGIC_VALUE_V2 && compression == CompressionType.NONE) { expectedEndOffsets = asList(1L, 4L, 5L, 6L); expectedStartOffsets = asList(1L, 4L, 5L, 6L); expectedMaxTimestamps = asList(11L, 20L, 15L, 16L); } else if (magic < RecordBatch.MAGIC_VALUE_V2) { expectedEndOffsets = asList(1L, 5L, 6L); expectedStartOffsets = asList(1L, 4L, 6L); expectedMaxTimestamps = asList(11L, 20L, 16L); } else { expectedEndOffsets = asList(2L, 5L, 6L); expectedStartOffsets = asList(1L, 3L, 6L); expectedMaxTimestamps = asList(11L, 20L, 16L); } assertEquals(expectedEndOffsets.size(), batches.size()); for (int i = 0; i < expectedEndOffsets.size(); i++) { RecordBatch batch = batches.get(i); assertEquals(expectedStartOffsets.get(i).longValue(), batch.baseOffset()); assertEquals(expectedEndOffsets.get(i).longValue(), batch.lastOffset()); assertEquals(magic, batch.magic()); assertEquals(compression, batch.compressionType()); if (magic >= RecordBatch.MAGIC_VALUE_V1) { assertEquals(expectedMaxTimestamps.get(i).longValue(), batch.maxTimestamp()); assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); } else { assertEquals(RecordBatch.NO_TIMESTAMP, batch.maxTimestamp()); assertEquals(TimestampType.NO_TIMESTAMP_TYPE, batch.timestampType()); } } List<Record> records = TestUtils.toList(filteredRecords.records()); assertEquals(4, records.size()); Record first = records.get(0); assertEquals(1L, first.offset()); if (magic > RecordBatch.MAGIC_VALUE_V0) assertEquals(11L, first.timestamp()); assertEquals("1", Utils.utf8(first.key(), first.keySize())); assertEquals("b", Utils.utf8(first.value(), first.valueSize())); Record second = records.get(1); assertEquals(4L, second.offset()); if (magic > RecordBatch.MAGIC_VALUE_V0) assertEquals(20L, second.timestamp()); assertEquals("4", Utils.utf8(second.key(), second.keySize())); assertEquals("e", Utils.utf8(second.value(), second.valueSize())); Record third = records.get(2); assertEquals(5L, third.offset()); if (magic > RecordBatch.MAGIC_VALUE_V0) assertEquals(15L, third.timestamp()); assertEquals("5", Utils.utf8(third.key(), third.keySize())); assertEquals("f", Utils.utf8(third.value(), third.valueSize())); Record fourth = records.get(3); assertEquals(6L, fourth.offset()); if (magic > RecordBatch.MAGIC_VALUE_V0) assertEquals(16L, fourth.timestamp()); assertEquals("6", Utils.utf8(fourth.key(), fourth.keySize())); assertEquals("g", Utils.utf8(fourth.value(), fourth.valueSize())); }
|
public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); }
|
MemoryRecords extends AbstractRecords { public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); } }
|
MemoryRecords extends AbstractRecords { public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); } private MemoryRecords(ByteBuffer buffer); }
|
MemoryRecords extends AbstractRecords { public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); }
|
MemoryRecords extends AbstractRecords { public FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer, int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier) { return filterTo(partition, batches(), filter, destinationBuffer, maxRecordBatchSize, decompressionBufferSupplier); } private MemoryRecords(ByteBuffer buffer); @Override int sizeInBytes(); @Override long writeTo(GatheringByteChannel channel, long position, int length); int writeFullyTo(GatheringByteChannel channel); int validBytes(); @Override MemoryRecords downConvert(byte toMagic, long firstOffset); FilterResult filterTo(TopicPartition partition, RecordFilter filter, ByteBuffer destinationBuffer,
int maxRecordBatchSize, BufferSupplier decompressionBufferSupplier); ByteBuffer buffer(); @Override Iterable<MutableRecordBatch> batches(); @Override String toString(); @Override boolean equals(Object o); @Override int hashCode(); static MemoryRecords readableRecords(ByteBuffer buffer); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder idempotentBuilder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
CompressionType compressionType,
long baseOffset,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
int partitionLeaderEpoch); static MemoryRecordsBuilder builder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch); static MemoryRecords withRecords(CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(CompressionType compressionType, int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withRecords(byte magic, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, SimpleRecord... records); static MemoryRecords withRecords(long initialOffset, CompressionType compressionType, Integer partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withIdempotentRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withIdempotentRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withTransactionalRecords(CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, SimpleRecord... records); static MemoryRecords withTransactionalRecords(byte magic, long initialOffset, CompressionType compressionType,
long producerId, short producerEpoch, int baseSequence,
int partitionLeaderEpoch, SimpleRecord... records); static MemoryRecords withTransactionalRecords(long initialOffset, CompressionType compressionType, long producerId,
short producerEpoch, int baseSequence, int partitionLeaderEpoch,
SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, SimpleRecord... records); static MemoryRecords withRecords(byte magic, long initialOffset, CompressionType compressionType,
TimestampType timestampType, long producerId, short producerEpoch,
int baseSequence, int partitionLeaderEpoch, boolean isTransactional,
SimpleRecord ... records); static MemoryRecords withEndTransactionMarker(long producerId, short producerEpoch, EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long timestamp, long producerId, short producerEpoch,
EndTransactionMarker marker); static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch,
long producerId, short producerEpoch,
EndTransactionMarker marker); static void writeEndTransactionalMarker(ByteBuffer buffer, long initialOffset, long timestamp,
int partitionLeaderEpoch, long producerId, short producerEpoch,
EndTransactionMarker marker); static final MemoryRecords EMPTY; }
|
@Test(expected = CorruptRecordException.class) public void iteratorRaisesOnTooSmallRecords() throws IOException { ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); builder.append(15L, "a".getBytes(), "1".getBytes()); builder.append(20L, "b".getBytes(), "2".getBytes()); builder.close(); int position = buffer.position(); builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 2L); builder.append(30L, "c".getBytes(), "3".getBytes()); builder.append(40L, "d".getBytes(), "4".getBytes()); builder.close(); buffer.flip(); buffer.putInt(position + DefaultRecordBatch.LENGTH_OFFSET, 9); ByteBufferLogInputStream logInputStream = new ByteBufferLogInputStream(buffer, Integer.MAX_VALUE); assertNotNull(logInputStream.nextBatch()); logInputStream.nextBatch(); }
|
public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); MutableRecordBatch nextBatch(); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); MutableRecordBatch nextBatch(); }
|
@Test(expected = CorruptRecordException.class) public void iteratorRaisesOnInvalidMagic() throws IOException { ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); builder.append(15L, "a".getBytes(), "1".getBytes()); builder.append(20L, "b".getBytes(), "2".getBytes()); builder.close(); int position = buffer.position(); builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 2L); builder.append(30L, "c".getBytes(), "3".getBytes()); builder.append(40L, "d".getBytes(), "4".getBytes()); builder.close(); buffer.flip(); buffer.put(position + DefaultRecordBatch.MAGIC_OFFSET, (byte) 37); ByteBufferLogInputStream logInputStream = new ByteBufferLogInputStream(buffer, Integer.MAX_VALUE); assertNotNull(logInputStream.nextBatch()); logInputStream.nextBatch(); }
|
public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); MutableRecordBatch nextBatch(); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); MutableRecordBatch nextBatch(); }
|
@Test(expected = CorruptRecordException.class) public void iteratorRaisesOnTooLargeRecords() throws IOException { ByteBuffer buffer = ByteBuffer.allocate(1024); MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 0L); builder.append(15L, "a".getBytes(), "1".getBytes()); builder.append(20L, "b".getBytes(), "2".getBytes()); builder.close(); builder = MemoryRecords.builder(buffer, CompressionType.NONE, TimestampType.CREATE_TIME, 2L); builder.append(30L, "c".getBytes(), "3".getBytes()); builder.append(40L, "d".getBytes(), "4".getBytes()); builder.close(); buffer.flip(); ByteBufferLogInputStream logInputStream = new ByteBufferLogInputStream(buffer, 25); assertNotNull(logInputStream.nextBatch()); logInputStream.nextBatch(); }
|
public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); MutableRecordBatch nextBatch(); }
|
ByteBufferLogInputStream implements LogInputStream<MutableRecordBatch> { public MutableRecordBatch nextBatch() throws IOException { int remaining = buffer.remaining(); if (remaining < LOG_OVERHEAD) return null; int recordSize = buffer.getInt(buffer.position() + SIZE_OFFSET); if (recordSize < LegacyRecord.RECORD_OVERHEAD_V0) throw new CorruptRecordException(String.format("Record size is less than the minimum record overhead (%d)", LegacyRecord.RECORD_OVERHEAD_V0)); if (recordSize > maxMessageSize) throw new CorruptRecordException(String.format("Record size exceeds the largest allowable message size (%d).", maxMessageSize)); int batchSize = recordSize + LOG_OVERHEAD; if (remaining < batchSize) return null; byte magic = buffer.get(buffer.position() + MAGIC_OFFSET); ByteBuffer batchSlice = buffer.slice(); batchSlice.limit(batchSize); buffer.position(buffer.position() + batchSize); if (magic < 0 || magic > RecordBatch.CURRENT_MAGIC_VALUE) throw new CorruptRecordException("Invalid magic found in record: " + magic); if (magic > RecordBatch.MAGIC_VALUE_V1) return new DefaultRecordBatch(batchSlice); else return new AbstractLegacyRecordBatch.ByteBufferLegacyRecordBatch(batchSlice); } ByteBufferLogInputStream(ByteBuffer buffer, int maxMessageSize); MutableRecordBatch nextBatch(); }
|
@Test public void testWriteEmptyHeader() { long producerId = 23423L; short producerEpoch = 145; int baseSequence = 983; long baseOffset = 15L; long lastOffset = 37; int partitionLeaderEpoch = 15; long timestamp = System.currentTimeMillis(); for (TimestampType timestampType : Arrays.asList(TimestampType.CREATE_TIME, TimestampType.LOG_APPEND_TIME)) { for (boolean isTransactional : Arrays.asList(true, false)) { for (boolean isControlBatch : Arrays.asList(true, false)) { ByteBuffer buffer = ByteBuffer.allocate(2048); DefaultRecordBatch.writeEmptyHeader(buffer, RecordBatch.CURRENT_MAGIC_VALUE, producerId, producerEpoch, baseSequence, baseOffset, lastOffset, partitionLeaderEpoch, timestampType, timestamp, isTransactional, isControlBatch); buffer.flip(); DefaultRecordBatch batch = new DefaultRecordBatch(buffer); assertEquals(producerId, batch.producerId()); assertEquals(producerEpoch, batch.producerEpoch()); assertEquals(baseSequence, batch.baseSequence()); assertEquals(baseSequence + ((int) (lastOffset - baseOffset)), batch.lastSequence()); assertEquals(baseOffset, batch.baseOffset()); assertEquals(lastOffset, batch.lastOffset()); assertEquals(partitionLeaderEpoch, batch.partitionLeaderEpoch()); assertEquals(isTransactional, batch.isTransactional()); assertEquals(timestampType, batch.timestampType()); assertEquals(timestamp, batch.maxTimestamp()); assertEquals(isControlBatch, batch.isControlBatch()); } } } }
|
public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, timestamp, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, timestamp, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, timestamp, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, timestamp, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public static void writeEmptyHeader(ByteBuffer buffer, byte magic, long producerId, short producerEpoch, int baseSequence, long baseOffset, long lastOffset, int partitionLeaderEpoch, TimestampType timestampType, long timestamp, boolean isTransactional, boolean isControlRecord) { int offsetDelta = (int) (lastOffset - baseOffset); writeHeader(buffer, baseOffset, offsetDelta, DefaultRecordBatch.RECORD_BATCH_OVERHEAD, magic, CompressionType.NONE, timestampType, timestamp, timestamp, producerId, producerEpoch, baseSequence, isTransactional, isControlRecord, partitionLeaderEpoch, 0); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test public void nullSchemaAndMapToJson() { Map<String, Object> input = new HashMap<>(); input.put("key1", 12); input.put("key2", "string"); input.put("key3", true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, input)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(JsonNodeFactory.instance.objectNode().put("key1", 12).put("key2", "string").put("key3", true), converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void testSizeInBytes() { Header[] headers = new Header[] { new RecordHeader("foo", "value".getBytes()), new RecordHeader("bar", (byte[]) null) }; long timestamp = System.currentTimeMillis(); SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord(timestamp, "key".getBytes(), "value".getBytes()), new SimpleRecord(timestamp + 30000, null, "value".getBytes()), new SimpleRecord(timestamp + 60000, "key".getBytes(), null), new SimpleRecord(timestamp + 60000, "key".getBytes(), "value".getBytes(), headers) }; int actualSize = MemoryRecords.withRecords(CompressionType.NONE, records).sizeInBytes(); assertEquals(actualSize, DefaultRecordBatch.sizeInBytes(Arrays.asList(records))); }
|
@Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public int sizeInBytes() { return LOG_OVERHEAD + buffer.getInt(LENGTH_OFFSET); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooManyNonCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.NONE, 5); for (Record record: batch) { record.isValid(); } }
|
public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooLittleNonCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.NONE, 2); for (Record record: batch) { record.isValid(); } }
|
public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooManyCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.GZIP, 5); for (Record record: batch) { record.isValid(); } }
|
public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test(expected = InvalidRecordException.class) public void testInvalidRecordCountTooLittleCompressedV2() { long now = System.currentTimeMillis(); DefaultRecordBatch batch = recordsWithInvalidRecordCount(RecordBatch.MAGIC_VALUE_V2, now, CompressionType.GZIP, 2); for (Record record: batch) { record.isValid(); } }
|
public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { public boolean isValid() { return sizeInBytes() >= RECORD_BATCH_OVERHEAD && checksum() == computeChecksum(); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test public void testSetLastOffset() { SimpleRecord[] simpleRecords = new SimpleRecord[] { new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes()) }; MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, simpleRecords); long lastOffset = 500L; long firstOffset = lastOffset - simpleRecords.length + 1; DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setLastOffset(lastOffset); assertEquals(lastOffset, batch.lastOffset()); assertEquals(firstOffset, batch.baseOffset()); assertTrue(batch.isValid()); List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator()); assertEquals(1, recordBatches.size()); assertEquals(lastOffset, recordBatches.get(0).lastOffset()); long offset = firstOffset; for (Record record : records.records()) assertEquals(offset++, record.offset()); }
|
@Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setLastOffset(long offset) { buffer.putLong(BASE_OFFSET_OFFSET, offset - lastOffsetDelta()); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test public void testSetPartitionLeaderEpoch() { MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes())); int leaderEpoch = 500; DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setPartitionLeaderEpoch(leaderEpoch); assertEquals(leaderEpoch, batch.partitionLeaderEpoch()); assertTrue(batch.isValid()); List<MutableRecordBatch> recordBatches = Utils.toList(records.batches().iterator()); assertEquals(1, recordBatches.size()); assertEquals(leaderEpoch, recordBatches.get(0).partitionLeaderEpoch()); }
|
@Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setPartitionLeaderEpoch(int epoch) { buffer.putInt(PARTITION_LEADER_EPOCH_OFFSET, epoch); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test(expected = IllegalArgumentException.class) public void testSetNoTimestampTypeNotAllowed() { MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, 0L, CompressionType.NONE, TimestampType.CREATE_TIME, new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), new SimpleRecord(3L, "c".getBytes(), "3".getBytes())); DefaultRecordBatch batch = new DefaultRecordBatch(records.buffer()); batch.setMaxTimestamp(TimestampType.NO_TIMESTAMP_TYPE, RecordBatch.NO_TIMESTAMP); }
|
@Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public void setMaxTimestamp(TimestampType timestampType, long maxTimestamp) { long currentMaxTimestamp = maxTimestamp(); if (timestampType() == timestampType && currentMaxTimestamp == maxTimestamp) return; byte attributes = computeAttributes(compressionType(), timestampType, isTransactional(), isControlBatch()); buffer.putShort(ATTRIBUTES_OFFSET, attributes); buffer.putLong(MAX_TIMESTAMP_OFFSET, maxTimestamp); long crc = computeChecksum(); ByteUtils.writeUnsignedInt(buffer, CRC_OFFSET, crc); } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test public void testReadAndWriteControlBatch() { long producerId = 1L; short producerEpoch = 0; int coordinatorEpoch = 15; ByteBuffer buffer = ByteBuffer.allocate(128); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, CompressionType.NONE, TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, producerId, producerEpoch, RecordBatch.NO_SEQUENCE, true, true, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.remaining()); EndTransactionMarker marker = new EndTransactionMarker(ControlRecordType.COMMIT, coordinatorEpoch); builder.appendEndTxnMarker(System.currentTimeMillis(), marker); MemoryRecords records = builder.build(); List<MutableRecordBatch> batches = TestUtils.toList(records.batches()); assertEquals(1, batches.size()); MutableRecordBatch batch = batches.get(0); assertTrue(batch.isControlBatch()); List<Record> logRecords = TestUtils.toList(records.records()); assertEquals(1, logRecords.size()); Record commitRecord = logRecords.get(0); assertEquals(marker, EndTransactionMarker.deserialize(commitRecord)); }
|
@Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { @Override public boolean isControlBatch() { return (attributes() & CONTROL_FLAG_MASK) > 0; } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test public void testIncrementSequence() { assertEquals(10, DefaultRecordBatch.incrementSequence(5, 5)); assertEquals(0, DefaultRecordBatch.incrementSequence(Integer.MAX_VALUE, 1)); assertEquals(4, DefaultRecordBatch.incrementSequence(Integer.MAX_VALUE - 5, 10)); }
|
static int incrementSequence(int baseSequence, int increment) { if (baseSequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - baseSequence) - 1; return baseSequence + increment; }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { static int incrementSequence(int baseSequence, int increment) { if (baseSequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - baseSequence) - 1; return baseSequence + increment; } }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { static int incrementSequence(int baseSequence, int increment) { if (baseSequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - baseSequence) - 1; return baseSequence + increment; } DefaultRecordBatch(ByteBuffer buffer); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { static int incrementSequence(int baseSequence, int increment) { if (baseSequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - baseSequence) - 1; return baseSequence + increment; } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); }
|
DefaultRecordBatch extends AbstractRecordBatch implements MutableRecordBatch { static int incrementSequence(int baseSequence, int increment) { if (baseSequence > Integer.MAX_VALUE - increment) return increment - (Integer.MAX_VALUE - baseSequence) - 1; return baseSequence + increment; } DefaultRecordBatch(ByteBuffer buffer); @Override byte magic(); @Override void ensureValid(); long baseTimestamp(); @Override long maxTimestamp(); @Override TimestampType timestampType(); @Override long baseOffset(); @Override long lastOffset(); @Override long producerId(); @Override short producerEpoch(); @Override int baseSequence(); @Override int lastSequence(); @Override CompressionType compressionType(); @Override int sizeInBytes(); @Override Integer countOrNull(); @Override void writeTo(ByteBuffer buffer); @Override void writeTo(ByteBufferOutputStream outputStream); @Override boolean isTransactional(); @Override boolean isControlBatch(); @Override int partitionLeaderEpoch(); @Override Iterator<Record> iterator(); @Override CloseableIterator<Record> streamingIterator(BufferSupplier bufferSupplier); @Override void setLastOffset(long offset); @Override void setMaxTimestamp(TimestampType timestampType, long maxTimestamp); @Override void setPartitionLeaderEpoch(int epoch); @Override long checksum(); boolean isValid(); @Override boolean equals(Object o); @Override int hashCode(); static void writeEmptyHeader(ByteBuffer buffer,
byte magic,
long producerId,
short producerEpoch,
int baseSequence,
long baseOffset,
long lastOffset,
int partitionLeaderEpoch,
TimestampType timestampType,
long timestamp,
boolean isTransactional,
boolean isControlRecord); @Override String toString(); static int sizeInBytes(long baseOffset, Iterable<Record> records); static int sizeInBytes(Iterable<SimpleRecord> records); static final int RECORD_BATCH_OVERHEAD; }
|
@Test public void nullSchemaAndMapNonStringKeysToJson() { Map<Object, Object> input = new HashMap<>(); input.put("string", 12); input.put(52, "string"); input.put(false, true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, input)); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertTrue(converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME).isArray()); ArrayNode payload = (ArrayNode) converted.get(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME); assertEquals(3, payload.size()); Set<JsonNode> payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add("string").add(12), JsonNodeFactory.instance.arrayNode().add(52).add("string"), JsonNodeFactory.instance.arrayNode().add(false).add(true))), payloadEntries ); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void testIterationDoesntChangePosition() throws IOException { long position = fileRecords.channel().position(); Iterator<Record> records = fileRecords.records().iterator(); for (byte[] value : values) { assertTrue(records.hasNext()); assertEquals(records.next().value(), ByteBuffer.wrap(value)); } assertEquals(position, fileRecords.channel().position()); }
|
public FileChannel channel() { return channel; }
|
FileRecords extends AbstractRecords implements Closeable { public FileChannel channel() { return channel; } }
|
FileRecords extends AbstractRecords implements Closeable { public FileChannel channel() { return channel; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); }
|
FileRecords extends AbstractRecords implements Closeable { public FileChannel channel() { return channel; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
FileRecords extends AbstractRecords implements Closeable { public FileChannel channel() { return channel; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
@Test public void testRead() throws IOException { FileRecords read = fileRecords.read(0, fileRecords.sizeInBytes()); TestUtils.checkEquals(fileRecords.batches(), read.batches()); List<RecordBatch> items = batches(read); RecordBatch second = items.get(1); read = fileRecords.read(second.sizeInBytes(), fileRecords.sizeInBytes()); assertEquals("Try a read starting from the second message", items.subList(1, 3), batches(read)); read = fileRecords.read(second.sizeInBytes(), second.sizeInBytes()); assertEquals("Try a read of a single message starting from the second message", Collections.singletonList(second), batches(read)); }
|
public FileRecords read(int position, int size) throws IOException { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size); final int end; if (this.start + position + size < 0) end = sizeInBytes(); else end = Math.min(this.start + position + size, sizeInBytes()); return new FileRecords(file, channel, this.start + position, end, true); }
|
FileRecords extends AbstractRecords implements Closeable { public FileRecords read(int position, int size) throws IOException { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size); final int end; if (this.start + position + size < 0) end = sizeInBytes(); else end = Math.min(this.start + position + size, sizeInBytes()); return new FileRecords(file, channel, this.start + position, end, true); } }
|
FileRecords extends AbstractRecords implements Closeable { public FileRecords read(int position, int size) throws IOException { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size); final int end; if (this.start + position + size < 0) end = sizeInBytes(); else end = Math.min(this.start + position + size, sizeInBytes()); return new FileRecords(file, channel, this.start + position, end, true); } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); }
|
FileRecords extends AbstractRecords implements Closeable { public FileRecords read(int position, int size) throws IOException { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size); final int end; if (this.start + position + size < 0) end = sizeInBytes(); else end = Math.min(this.start + position + size, sizeInBytes()); return new FileRecords(file, channel, this.start + position, end, true); } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
FileRecords extends AbstractRecords implements Closeable { public FileRecords read(int position, int size) throws IOException { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size); final int end; if (this.start + position + size < 0) end = sizeInBytes(); else end = Math.min(this.start + position + size, sizeInBytes()); return new FileRecords(file, channel, this.start + position, end, true); } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
@Test public void testTruncateNotCalledIfSizeIsSameAsTargetSize() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); EasyMock.expect(channelMock.size()).andReturn(42L).atLeastOnce(); EasyMock.expect(channelMock.position(42L)).andReturn(null); EasyMock.replay(channelMock); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(42); EasyMock.verify(channelMock); }
|
public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
@Test public void testTruncateNotCalledIfSizeIsBiggerThanTargetSize() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); EasyMock.expect(channelMock.size()).andReturn(42L).atLeastOnce(); EasyMock.expect(channelMock.position(42L)).andReturn(null); EasyMock.replay(channelMock); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); try { fileRecords.truncateTo(43); fail("Should throw KafkaException"); } catch (KafkaException e) { } EasyMock.verify(channelMock); }
|
public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
@Test public void testTruncateIfSizeIsDifferentToTargetSize() throws IOException { FileChannel channelMock = EasyMock.createMock(FileChannel.class); EasyMock.expect(channelMock.size()).andReturn(42L).atLeastOnce(); EasyMock.expect(channelMock.position(42L)).andReturn(null).once(); EasyMock.expect(channelMock.truncate(23L)).andReturn(null).once(); EasyMock.replay(channelMock); FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(23); EasyMock.verify(channelMock); }
|
public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
FileRecords extends AbstractRecords implements Closeable { public int truncateTo(int targetSize) throws IOException { int originalSize = sizeInBytes(); if (targetSize > originalSize || targetSize < 0) throw new KafkaException("Attempt to truncate log segment to " + targetSize + " bytes failed, " + " size of this log segment is " + originalSize + " bytes."); if (targetSize < (int) channel.size()) { channel.truncate(targetSize); size.set(targetSize); } return originalSize - targetSize; } FileRecords(File file,
FileChannel channel,
int start,
int end,
boolean isSlice); @Override int sizeInBytes(); File file(); FileChannel channel(); ByteBuffer readInto(ByteBuffer buffer, int position); FileRecords read(int position, int size); int append(MemoryRecords records); void flush(); void close(); boolean delete(); void trim(); void setFile(File file); void renameTo(File f); int truncateTo(int targetSize); @Override Records downConvert(byte toMagic, long firstOffset); @Override long writeTo(GatheringByteChannel destChannel, long offset, int length); LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition); TimestampAndOffset searchForTimestamp(long targetTimestamp, int startingPosition, long startingOffset); TimestampAndOffset largestTimestampAfter(int startingPosition); @Override Iterable<FileChannelRecordBatch> batches(); static FileRecords open(File file,
boolean mutable,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file,
boolean fileAlreadyExists,
int initFileSize,
boolean preallocate); static FileRecords open(File file, boolean mutable); static FileRecords open(File file); }
|
@Test(expected = IllegalArgumentException.class) public void testWriteTransactionalWithInvalidPID() { ByteBuffer buffer = ByteBuffer.allocate(128); buffer.position(bufferOffset); long pid = RecordBatch.NO_PRODUCER_ID; short epoch = 15; int sequence = 2342; MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, 0L, 0L, pid, epoch, sequence, true, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity()); builder.close(); }
|
public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
@Test(expected = IllegalArgumentException.class) public void testWriteIdempotentWithInvalidEpoch() { ByteBuffer buffer = ByteBuffer.allocate(128); buffer.position(bufferOffset); long pid = 9809; short epoch = RecordBatch.NO_PRODUCER_EPOCH; int sequence = 2342; MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, 0L, 0L, pid, epoch, sequence, true, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity()); builder.close(); }
|
public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
@Test(expected = IllegalArgumentException.class) public void testWriteIdempotentWithInvalidBaseSequence() { ByteBuffer buffer = ByteBuffer.allocate(128); buffer.position(bufferOffset); long pid = 9809; short epoch = 15; int sequence = RecordBatch.NO_SEQUENCE; MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, 0L, 0L, pid, epoch, sequence, true, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity()); builder.close(); }
|
public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
MemoryRecordsBuilder { public void close() { if (aborted) throw new IllegalStateException("Cannot close MemoryRecordsBuilder as it has already been aborted"); if (builtRecords != null) return; validateProducerState(); closeForRecordAppends(); if (numRecords == 0L) { buffer().position(initialPosition); builtRecords = MemoryRecords.EMPTY; } else { if (magic > RecordBatch.MAGIC_VALUE_V1) this.actualCompressionRatio = (float) writeDefaultBatchHeader() / this.writtenUncompressed; else if (compressionType != CompressionType.NONE) this.actualCompressionRatio = (float) writeLegacyCompressedWrapperHeader() / this.writtenUncompressed; ByteBuffer buffer = buffer().duplicate(); buffer.flip(); buffer.position(initialPosition); builtRecords = MemoryRecords.readableRecords(buffer.slice()); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
@Test(expected = IllegalArgumentException.class) public void testWriteEndTxnMarkerNonTransactionalBatch() { ByteBuffer buffer = ByteBuffer.allocate(128); buffer.position(bufferOffset); long pid = 9809; short epoch = 15; int sequence = RecordBatch.NO_SEQUENCE; MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, 0L, 0L, pid, epoch, sequence, false, true, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity()); builder.appendEndTxnMarker(RecordBatch.NO_TIMESTAMP, new EndTransactionMarker(ControlRecordType.ABORT, 0)); }
|
public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
@Test(expected = DataException.class) public void mismatchSchemaJson() { converter.fromConnectData(TOPIC, Schema.FLOAT64_SCHEMA, true); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test(expected = IllegalArgumentException.class) public void testWriteEndTxnMarkerNonControlBatch() { ByteBuffer buffer = ByteBuffer.allocate(128); buffer.position(bufferOffset); long pid = 9809; short epoch = 15; int sequence = RecordBatch.NO_SEQUENCE; MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compressionType, TimestampType.CREATE_TIME, 0L, 0L, pid, epoch, sequence, true, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity()); builder.appendEndTxnMarker(RecordBatch.NO_TIMESTAMP, new EndTransactionMarker(ControlRecordType.ABORT, 0)); }
|
public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
MemoryRecordsBuilder { public Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker) { if (producerId == RecordBatch.NO_PRODUCER_ID) throw new IllegalArgumentException("End transaction marker requires a valid producerId"); if (!isTransactional) throw new IllegalArgumentException("End transaction marker depends on batch transactional flag being enabled"); ByteBuffer value = marker.serializeValue(); return appendControlRecord(timestamp, marker.controlType(), value); } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
@Test(expected = IllegalArgumentException.class) public void testAppendAtInvalidOffset() { ByteBuffer buffer = ByteBuffer.allocate(1024); buffer.position(bufferOffset); long logAppendTime = System.currentTimeMillis(); MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, RecordBatch.MAGIC_VALUE_V1, compressionType, TimestampType.CREATE_TIME, 0L, logAppendTime, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, false, false, RecordBatch.NO_PARTITION_LEADER_EPOCH, buffer.capacity()); builder.appendWithOffset(0L, System.currentTimeMillis(), "a".getBytes(), null); builder.appendWithOffset(0L, System.currentTimeMillis(), "b".getBytes(), null); }
|
private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { try { if (isControlRecord != isControlBatch) throw new IllegalArgumentException("Control records can only be appended to control batches"); if (lastOffset != null && offset <= lastOffset) throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + "(Offsets must increase monotonically).", offset, lastOffset)); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); if (baseTimestamp == null) baseTimestamp = timestamp; if (magic > RecordBatch.MAGIC_VALUE_V1) { appendDefaultRecord(offset, timestamp, key, value, headers); return null; } else { return appendLegacyRecord(offset, timestamp, key, value); } } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } }
|
MemoryRecordsBuilder { private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { try { if (isControlRecord != isControlBatch) throw new IllegalArgumentException("Control records can only be appended to control batches"); if (lastOffset != null && offset <= lastOffset) throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + "(Offsets must increase monotonically).", offset, lastOffset)); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); if (baseTimestamp == null) baseTimestamp = timestamp; if (magic > RecordBatch.MAGIC_VALUE_V1) { appendDefaultRecord(offset, timestamp, key, value, headers); return null; } else { return appendLegacyRecord(offset, timestamp, key, value); } } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } } }
|
MemoryRecordsBuilder { private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { try { if (isControlRecord != isControlBatch) throw new IllegalArgumentException("Control records can only be appended to control batches"); if (lastOffset != null && offset <= lastOffset) throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + "(Offsets must increase monotonically).", offset, lastOffset)); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); if (baseTimestamp == null) baseTimestamp = timestamp; if (magic > RecordBatch.MAGIC_VALUE_V1) { appendDefaultRecord(offset, timestamp, key, value, headers); return null; } else { return appendLegacyRecord(offset, timestamp, key, value); } } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); }
|
MemoryRecordsBuilder { private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { try { if (isControlRecord != isControlBatch) throw new IllegalArgumentException("Control records can only be appended to control batches"); if (lastOffset != null && offset <= lastOffset) throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + "(Offsets must increase monotonically).", offset, lastOffset)); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); if (baseTimestamp == null) baseTimestamp = timestamp; if (magic > RecordBatch.MAGIC_VALUE_V1) { appendDefaultRecord(offset, timestamp, key, value, headers); return null; } else { return appendLegacyRecord(offset, timestamp, key, value); } } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
MemoryRecordsBuilder { private Long appendWithOffset(long offset, boolean isControlRecord, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers) { try { if (isControlRecord != isControlBatch) throw new IllegalArgumentException("Control records can only be appended to control batches"); if (lastOffset != null && offset <= lastOffset) throw new IllegalArgumentException(String.format("Illegal offset %s following previous offset %s " + "(Offsets must increase monotonically).", offset, lastOffset)); if (timestamp < 0 && timestamp != RecordBatch.NO_TIMESTAMP) throw new IllegalArgumentException("Invalid negative timestamp " + timestamp); if (magic < RecordBatch.MAGIC_VALUE_V2 && headers != null && headers.length > 0) throw new IllegalArgumentException("Magic v" + magic + " does not support record headers"); if (baseTimestamp == null) baseTimestamp = timestamp; if (magic > RecordBatch.MAGIC_VALUE_V1) { appendDefaultRecord(offset, timestamp, key, value, headers); return null; } else { return appendLegacyRecord(offset, timestamp, key, value); } } catch (IOException e) { throw new KafkaException("I/O exception when writing to the append stream, closing", e); } } MemoryRecordsBuilder(ByteBufferOutputStream bufferStream,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); MemoryRecordsBuilder(ByteBuffer buffer,
byte magic,
CompressionType compressionType,
TimestampType timestampType,
long baseOffset,
long logAppendTime,
long producerId,
short producerEpoch,
int baseSequence,
boolean isTransactional,
boolean isControlBatch,
int partitionLeaderEpoch,
int writeLimit); ByteBuffer buffer(); int initialCapacity(); double compressionRatio(); CompressionType compressionType(); boolean isControlBatch(); boolean isTransactional(); MemoryRecords build(); RecordsInfo info(); void setProducerState(long producerId, short producerEpoch, int baseSequence, boolean isTransactional); void overrideLastOffset(long lastOffset); void closeForRecordAppends(); void abort(); void close(); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value, Header[] headers); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long appendWithOffset(long offset, long timestamp, byte[] key, byte[] value); Long appendWithOffset(long offset, long timestamp, ByteBuffer key, ByteBuffer value); Long appendWithOffset(long offset, SimpleRecord record); Long append(long timestamp, ByteBuffer key, ByteBuffer value); Long append(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); Long append(long timestamp, byte[] key, byte[] value); Long append(long timestamp, byte[] key, byte[] value, Header[] headers); Long append(SimpleRecord record); Long appendEndTxnMarker(long timestamp, EndTransactionMarker marker); void appendUncheckedWithOffset(long offset, LegacyRecord record); void append(Record record); void appendWithOffset(long offset, Record record); void appendWithOffset(long offset, LegacyRecord record); void append(LegacyRecord record); void setEstimatedCompressionRatio(float estimatedCompressionRatio); boolean hasRoomFor(long timestamp, byte[] key, byte[] value, Header[] headers); boolean hasRoomFor(long timestamp, ByteBuffer key, ByteBuffer value, Header[] headers); boolean isClosed(); boolean isFull(); int sizeInBytes(); byte magic(); long producerId(); short producerEpoch(); }
|
@Test public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() throws Exception { final MemoryRecords memoryRecords = MemoryRecords.withTransactionalRecords(0, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord); final ProduceRequest request = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) -1, 10, Collections.singletonMap( new TopicPartition("topic", 1), memoryRecords)).build(); assertTrue(request.isTransactional()); }
|
public boolean isTransactional() { return transactional; }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
@Test public void shouldNotBeFlaggedAsTransactionalWhenNoRecords() throws Exception { final ProduceRequest request = createNonIdempotentNonTransactionalRecords(); assertFalse(request.isTransactional()); }
|
public boolean isTransactional() { return transactional; }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
@Test public void shouldNotBeFlaggedAsIdempotentWhenRecordsNotIdempotent() throws Exception { final ProduceRequest request = createNonIdempotentNonTransactionalRecords(); assertFalse(request.isTransactional()); }
|
public boolean isTransactional() { return transactional; }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
ProduceRequest extends AbstractRequest { public boolean isTransactional() { return transactional; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
@Test public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() throws Exception { final MemoryRecords memoryRecords = MemoryRecords.withIdempotentRecords(1, CompressionType.NONE, 1L, (short) 1, 1, 1, simpleRecord); final ProduceRequest request = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) -1, 10, Collections.singletonMap( new TopicPartition("topic", 1), memoryRecords)).build(); assertTrue(request.isIdempotent()); }
|
public boolean isIdempotent() { return idempotent; }
|
ProduceRequest extends AbstractRequest { public boolean isIdempotent() { return idempotent; } }
|
ProduceRequest extends AbstractRequest { public boolean isIdempotent() { return idempotent; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); }
|
ProduceRequest extends AbstractRequest { public boolean isIdempotent() { return idempotent; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
ProduceRequest extends AbstractRequest { public boolean isIdempotent() { return idempotent; } private ProduceRequest(short version, short acks, int timeout, Map<TopicPartition, MemoryRecords> partitionRecords, String transactionalId); ProduceRequest(Struct struct, short version); @Override Struct toStruct(); @Override String toString(boolean verbose); @Override ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e); short acks(); int timeout(); String transactionalId(); boolean isTransactional(); boolean isIdempotent(); Map<TopicPartition, MemoryRecords> partitionRecordsOrFail(); void clearPartitionRecords(); static ProduceRequest parse(ByteBuffer buffer, short version); static byte requiredMagicForVersion(short produceRequestVersion); }
|
@Test public void testGetProducerConfigs() throws Exception { final String clientId = "client"; final Map<String, Object> returnedProps = streamsConfig.getProducerConfigs(clientId); assertEquals(returnedProps.get(ProducerConfig.CLIENT_ID_CONFIG), clientId + "-producer"); assertEquals(returnedProps.get(ProducerConfig.LINGER_MS_CONFIG), "100"); assertNull(returnedProps.get("DUMMY")); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void testGetConsumerConfigs() throws Exception { final String groupId = "example-application"; final String clientId = "client"; final Map<String, Object> returnedProps = streamsConfig.getConsumerConfigs(null, groupId, clientId); assertEquals(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), clientId + "-consumer"); assertEquals(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG), groupId); assertEquals(returnedProps.get(ConsumerConfig.MAX_POLL_RECORDS_CONFIG), "1000"); assertNull(returnedProps.get("DUMMY")); }
|
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void testGetRestoreConsumerConfigs() throws Exception { final String clientId = "client"; final Map<String, Object> returnedProps = streamsConfig.getRestoreConsumerConfigs(clientId); assertEquals(returnedProps.get(ConsumerConfig.CLIENT_ID_CONFIG), clientId + "-restore-consumer"); assertNull(returnedProps.get(ConsumerConfig.GROUP_ID_CONFIG)); assertNull(returnedProps.get("DUMMY")); }
|
public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void testCacheSchemaToJsonConversion() { Cache<Schema, ObjectNode> cache = Whitebox.getInternalState(converter, "fromConnectSchemaCache"); assertEquals(0, cache.size()); converter.fromConnectData(TOPIC, SchemaBuilder.bool().build(), true); assertEquals(1, cache.size()); converter.fromConnectData(TOPIC, SchemaBuilder.bool().build(), true); assertEquals(1, cache.size()); converter.fromConnectData(TOPIC, SchemaBuilder.bool().optional().build(), true); assertEquals(2, cache.size()); }
|
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { JsonNode jsonValue = enableSchemas ? convertToJsonWithEnvelope(schema, value) : convertToJsonWithoutEnvelope(schema, value); try { return serializer.serialize(topic, jsonValue); } catch (SerializationException e) { throw new DataException("Converting Kafka Connect data to byte[] failed due to serialization error: ", e); } } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void shouldBeSupportNonPrefixedConsumerConfigs() throws Exception { props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.put(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG, 1); final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> consumerConfigs = streamsConfig.getConsumerConfigs(null, "groupId", "clientId"); assertEquals("earliest", consumerConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)); assertEquals(1, consumerConfigs.get(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)); }
|
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldBeSupportNonPrefixedRestoreConsumerConfigs() throws Exception { props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); props.put(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG, 1); final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> consumerConfigs = streamsConfig.getRestoreConsumerConfigs("groupId"); assertEquals("earliest", consumerConfigs.get(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG)); assertEquals(1, consumerConfigs.get(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG)); }
|
public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getRestoreConsumerConfigs(final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.remove(ConsumerConfig.GROUP_ID_CONFIG); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-restore-consumer"); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldSupportNonPrefixedProducerConfigs() throws Exception { props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 10); props.put(ConsumerConfig.METRICS_NUM_SAMPLES_CONFIG, 1); final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> configs = streamsConfig.getProducerConfigs("clientId"); assertEquals(10, configs.get(ProducerConfig.BUFFER_MEMORY_CONFIG)); assertEquals(1, configs.get(ProducerConfig.METRICS_NUM_SAMPLES_CONFIG)); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfKeySerdeConfigFails() throws Exception { props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, MisconfiguredSerde.class); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.defaultKeySerde(); }
|
public Serde defaultKeySerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), true); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure key serde %s", get(DEFAULT_KEY_SERDE_CLASS_CONFIG)), e); } }
|
StreamsConfig extends AbstractConfig { public Serde defaultKeySerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), true); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure key serde %s", get(DEFAULT_KEY_SERDE_CLASS_CONFIG)), e); } } }
|
StreamsConfig extends AbstractConfig { public Serde defaultKeySerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), true); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure key serde %s", get(DEFAULT_KEY_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Serde defaultKeySerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), true); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure key serde %s", get(DEFAULT_KEY_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Serde defaultKeySerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_KEY_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), true); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure key serde %s", get(DEFAULT_KEY_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfValueSerdeConfigFails() throws Exception { props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, MisconfiguredSerde.class); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.defaultValueSerde(); }
|
public Serde defaultValueSerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure value serde %s", get(DEFAULT_VALUE_SERDE_CLASS_CONFIG)), e); } }
|
StreamsConfig extends AbstractConfig { public Serde defaultValueSerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure value serde %s", get(DEFAULT_VALUE_SERDE_CLASS_CONFIG)), e); } } }
|
StreamsConfig extends AbstractConfig { public Serde defaultValueSerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure value serde %s", get(DEFAULT_VALUE_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Serde defaultValueSerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure value serde %s", get(DEFAULT_VALUE_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Serde defaultValueSerde() { try { Serde<?> serde = getConfiguredInstance(DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serde.class); serde.configure(originals(), false); return serde; } catch (final Exception e) { throw new StreamsException(String.format("Failed to configure value serde %s", get(DEFAULT_VALUE_SERDE_CLASS_CONFIG)), e); } } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldSetInternalLeaveGroupOnCloseConfigToFalseInConsumer() throws Exception { final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> consumerConfigs = streamsConfig.getConsumerConfigs(null, "groupId", "clientId"); assertThat(consumerConfigs.get("internal.leave.group.on.close"), CoreMatchers.<Object>equalTo(false)); }
|
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test(expected = ConfigException.class) public void shouldThrowExceptionIfConsumerIsolationLevelIsOverriddenIfEosEnabled() { props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE); props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "anyValue"); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.getConsumerConfigs(null, "groupId", "clientId"); }
|
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldAllowSettingConsumerIsolationLevelIfEosDisabled() { props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, READ_UNCOMMITTED.name().toLowerCase(Locale.ROOT)); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.getConsumerConfigs(null, "groupId", "clientId"); }
|
public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getConsumerConfigs(final StreamThread streamThread, final String groupId, final String clientId) throws ConfigException { final Map<String, Object> consumerProps = getCommonConsumerConfigs(); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumerProps.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-consumer"); consumerProps.put(InternalConfig.STREAM_THREAD_INSTANCE, streamThread); consumerProps.put(REPLICATION_FACTOR_CONFIG, getInt(REPLICATION_FACTOR_CONFIG)); consumerProps.put(NUM_STANDBY_REPLICAS_CONFIG, getInt(NUM_STANDBY_REPLICAS_CONFIG)); consumerProps.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, StreamPartitionAssignor.class.getName()); consumerProps.put(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG, getLong(WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG)); consumerProps.put(APPLICATION_SERVER_CONFIG, getString(APPLICATION_SERVER_CONFIG)); return consumerProps; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test(expected = ConfigException.class) public void shouldThrowExceptionIfProducerEnableIdempotenceIsOverriddenIfEosEnabled() { props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE); props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "anyValue"); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.getProducerConfigs("clientId"); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldAllowSettingProducerEnableIdempotenceIfEosDisabled() { props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.getProducerConfigs("clientId"); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void testJsonSchemaCacheSizeFromConfigFile() throws URISyntaxException, IOException { URL url = getClass().getResource("/connect-test.properties"); File propFile = new File(url.toURI()); String workerPropsFile = propFile.getAbsolutePath(); Map<String, String> workerProps = !workerPropsFile.isEmpty() ? Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.<String, String>emptyMap(); JsonConverter rc = new JsonConverter(); rc.configure(workerProps, false); }
|
@Override public void configure(Map<String, ?> configs, boolean isKey) { Object enableConfigsVal = configs.get(SCHEMAS_ENABLE_CONFIG); if (enableConfigsVal != null) enableSchemas = enableConfigsVal.toString().equals("true"); serializer.configure(configs, isKey); deserializer.configure(configs, isKey); Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG); if (cacheSizeVal != null) cacheSize = Integer.parseInt((String) cacheSizeVal); fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<Schema, ObjectNode>(cacheSize)); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<JsonNode, Schema>(cacheSize)); }
|
JsonConverter implements Converter { @Override public void configure(Map<String, ?> configs, boolean isKey) { Object enableConfigsVal = configs.get(SCHEMAS_ENABLE_CONFIG); if (enableConfigsVal != null) enableSchemas = enableConfigsVal.toString().equals("true"); serializer.configure(configs, isKey); deserializer.configure(configs, isKey); Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG); if (cacheSizeVal != null) cacheSize = Integer.parseInt((String) cacheSizeVal); fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<Schema, ObjectNode>(cacheSize)); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<JsonNode, Schema>(cacheSize)); } }
|
JsonConverter implements Converter { @Override public void configure(Map<String, ?> configs, boolean isKey) { Object enableConfigsVal = configs.get(SCHEMAS_ENABLE_CONFIG); if (enableConfigsVal != null) enableSchemas = enableConfigsVal.toString().equals("true"); serializer.configure(configs, isKey); deserializer.configure(configs, isKey); Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG); if (cacheSizeVal != null) cacheSize = Integer.parseInt((String) cacheSizeVal); fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<Schema, ObjectNode>(cacheSize)); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<JsonNode, Schema>(cacheSize)); } }
|
JsonConverter implements Converter { @Override public void configure(Map<String, ?> configs, boolean isKey) { Object enableConfigsVal = configs.get(SCHEMAS_ENABLE_CONFIG); if (enableConfigsVal != null) enableSchemas = enableConfigsVal.toString().equals("true"); serializer.configure(configs, isKey); deserializer.configure(configs, isKey); Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG); if (cacheSizeVal != null) cacheSize = Integer.parseInt((String) cacheSizeVal); fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<Schema, ObjectNode>(cacheSize)); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<JsonNode, Schema>(cacheSize)); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public void configure(Map<String, ?> configs, boolean isKey) { Object enableConfigsVal = configs.get(SCHEMAS_ENABLE_CONFIG); if (enableConfigsVal != null) enableSchemas = enableConfigsVal.toString().equals("true"); serializer.configure(configs, isKey); deserializer.configure(configs, isKey); Object cacheSizeVal = configs.get(SCHEMAS_CACHE_SIZE_CONFIG); if (cacheSizeVal != null) cacheSize = Integer.parseInt((String) cacheSizeVal); fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<Schema, ObjectNode>(cacheSize)); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<JsonNode, Schema>(cacheSize)); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test(expected = ConfigException.class) public void shouldThrowExceptionIfProducerMaxInFlightRequestPerConnectionsIsOverriddenIfEosEnabled() { props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE); props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "anyValue"); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.getProducerConfigs("clientId"); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldAllowSettingProducerMaxInFlightRequestPerConnectionsWhenEosDisabled() { props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "anyValue"); final StreamsConfig streamsConfig = new StreamsConfig(props); streamsConfig.getProducerConfigs("clientId"); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldNotOverrideUserConfigRetriesIfExactlyOnceEnabled() { final int numberOfRetries = 42; props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, EXACTLY_ONCE); props.put(ProducerConfig.RETRIES_CONFIG, numberOfRetries); final StreamsConfig streamsConfig = new StreamsConfig(props); final Map<String, Object> producerConfigs = streamsConfig.getProducerConfigs("clientId"); assertThat((Integer) producerConfigs.get(ProducerConfig.RETRIES_CONFIG), equalTo(numberOfRetries)); }
|
public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); }
|
StreamsConfig extends AbstractConfig { public Map<String, Object> getProducerConfigs(final String clientId) { final Map<String, Object> clientProvidedProps = getClientPropsWithPrefix(PRODUCER_PREFIX, ProducerConfig.configNames()); if (eosEnabled) { if (clientProvidedProps.containsKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have idempotency enabled."); } if (clientProvidedProps.containsKey(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION)) { throw new ConfigException("Unexpected user-specified consumer config " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + "; because " + PROCESSING_GUARANTEE_CONFIG + " is set to '" + EXACTLY_ONCE + "' producer will always have only one in-flight request per connection."); } } final Map<String, Object> props = new HashMap<>(eosEnabled ? PRODUCER_EOS_OVERRIDES : PRODUCER_DEFAULT_OVERRIDES); props.putAll(clientProvidedProps); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, originals().get(BOOTSTRAP_SERVERS_CONFIG)); props.put(CommonClientConfigs.CLIENT_ID_CONFIG, clientId + "-producer"); return props; } StreamsConfig(final Map<?, ?> props); static String consumerPrefix(final String consumerProp); static String producerPrefix(final String producerProp); static ConfigDef configDef(); Map<String, Object> getConsumerConfigs(final StreamThread streamThread,
final String groupId,
final String clientId); Map<String, Object> getRestoreConsumerConfigs(final String clientId); Map<String, Object> getProducerConfigs(final String clientId); @Deprecated Serde keySerde(); Serde defaultKeySerde(); @Deprecated Serde valueSerde(); Serde defaultValueSerde(); TimestampExtractor defaultTimestampExtractor(); static void main(final String[] args); static final String CONSUMER_PREFIX; static final String PRODUCER_PREFIX; static final String AT_LEAST_ONCE; static final String EXACTLY_ONCE; static final String APPLICATION_ID_CONFIG; static final String APPLICATION_SERVER_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String BUFFERED_RECORDS_PER_PARTITION_CONFIG; static final String CACHE_MAX_BYTES_BUFFERING_CONFIG; static final String CLIENT_ID_CONFIG; static final String COMMIT_INTERVAL_MS_CONFIG; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String DEFAULT_KEY_SERDE_CLASS_CONFIG; static final String DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG; static final String DEFAULT_VALUE_SERDE_CLASS_CONFIG; @Deprecated
static final String KEY_SERDE_CLASS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String NUM_STANDBY_REPLICAS_CONFIG; static final String NUM_STREAM_THREADS_CONFIG; static final String PARTITION_GROUPER_CLASS_CONFIG; static final String POLL_MS_CONFIG; static final String PROCESSING_GUARANTEE_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String REPLICATION_FACTOR_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String ROCKSDB_CONFIG_SETTER_CLASS_CONFIG; static final String SECURITY_PROTOCOL_CONFIG; static final String SEND_BUFFER_CONFIG; static final String STATE_CLEANUP_DELAY_MS_CONFIG; static final String STATE_DIR_CONFIG; @Deprecated
static final String TIMESTAMP_EXTRACTOR_CLASS_CONFIG; @Deprecated
static final String VALUE_SERDE_CLASS_CONFIG; static final String WINDOW_STORE_CHANGE_LOG_ADDITIONAL_RETENTION_MS_CONFIG; @Deprecated
static final String ZOOKEEPER_CONNECT_CONFIG; }
|
@Test public void shouldCreateInMemoryStoreSupplierWithLoggedConfig() throws Exception { final StateStoreSupplier supplier = Stores.create("store") .withKeys(Serdes.String()) .withValues(Serdes.String()) .inMemory() .enableLogging(Collections.singletonMap("retention.ms", "1000")) .build(); final Map<String, String> config = supplier.logConfig(); assertTrue(supplier.loggingEnabled()); assertEquals("1000", config.get("retention.ms")); }
|
public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } static StoreFactory create(final String name); }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } static StoreFactory create(final String name); }
|
@Test public void shouldCreateInMemoryStoreSupplierNotLogged() throws Exception { final StateStoreSupplier supplier = Stores.create("store") .withKeys(Serdes.String()) .withValues(Serdes.String()) .inMemory() .disableLogging() .build(); assertFalse(supplier.loggingEnabled()); }
|
public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } static StoreFactory create(final String name); }
|
Stores { public static StoreFactory create(final String name) { return new StoreFactory() { @Override public <K> ValueFactory<K> withKeys(final Serde<K> keySerde) { return new ValueFactory<K>() { @Override public <V> KeyValueFactory<K, V> withValues(final Serde<V> valueSerde) { return new KeyValueFactory<K, V>() { @Override public InMemoryKeyValueFactory<K, V> inMemory() { return new InMemoryKeyValueFactory<K, V>() { private int capacity = Integer.MAX_VALUE; private final Map<String, String> logConfig = new HashMap<>(); private boolean logged = true; @Override public InMemoryKeyValueFactory<K, V> maxEntries(int capacity) { if (capacity < 1) throw new IllegalArgumentException("The capacity must be positive"); this.capacity = capacity; return this; } @Override public InMemoryKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public InMemoryKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public StateStoreSupplier build() { log.trace("Creating InMemory Store name={} capacity={} logged={}", name, capacity, logged); if (capacity < Integer.MAX_VALUE) { return new InMemoryLRUCacheStoreSupplier<>(name, capacity, keySerde, valueSerde, logged, logConfig); } return new InMemoryKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig); } }; } @Override public PersistentKeyValueFactory<K, V> persistent() { return new PersistentKeyValueFactory<K, V>() { public boolean cachingEnabled; private long windowSize; private final Map<String, String> logConfig = new HashMap<>(); private int numSegments = 0; private long retentionPeriod = 0L; private boolean retainDuplicates = false; private boolean sessionWindows; private boolean logged = true; @Override public PersistentKeyValueFactory<K, V> windowed(final long windowSize, final long retentionPeriod, final int numSegments, final boolean retainDuplicates) { this.windowSize = windowSize; this.numSegments = numSegments; this.retentionPeriod = retentionPeriod; this.retainDuplicates = retainDuplicates; this.sessionWindows = false; return this; } @Override public PersistentKeyValueFactory<K, V> sessionWindowed(final long retentionPeriod) { this.sessionWindows = true; this.retentionPeriod = retentionPeriod; return this; } @Override public PersistentKeyValueFactory<K, V> enableLogging(final Map<String, String> config) { logged = true; logConfig.putAll(config); return this; } @Override public PersistentKeyValueFactory<K, V> disableLogging() { logged = false; logConfig.clear(); return this; } @Override public PersistentKeyValueFactory<K, V> enableCaching() { cachingEnabled = true; return this; } @Override public StateStoreSupplier build() { log.trace("Creating RocksDb Store name={} numSegments={} logged={}", name, numSegments, logged); if (sessionWindows) { return new RocksDBSessionStoreSupplier<>(name, retentionPeriod, keySerde, valueSerde, logged, logConfig, cachingEnabled); } else if (numSegments > 0) { return new RocksDBWindowStoreSupplier<>(name, retentionPeriod, numSegments, retainDuplicates, keySerde, valueSerde, windowSize, logged, logConfig, cachingEnabled); } return new RocksDBKeyValueStoreSupplier<>(name, keySerde, valueSerde, logged, logConfig, cachingEnabled); } }; } }; } }; } }; } static StoreFactory create(final String name); }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.