target
stringlengths
20
113k
src_fm
stringlengths
11
86.3k
src_fm_fc
stringlengths
21
86.4k
src_fm_fc_co
stringlengths
30
86.4k
src_fm_fc_ms
stringlengths
42
86.8k
src_fm_fc_ms_ff
stringlengths
43
86.8k
@Test public void dateToConnect() { Schema schema = Date.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.DATE, 10000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1 }, \"payload\": 10000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testNonStringToBytes() throws UnsupportedEncodingException { assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectData(TOPIC, Schema.BOOLEAN_SCHEMA, true)); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test public void testNullToBytes() { assertEquals(null, converter.fromConnectData(TOPIC, Schema.OPTIONAL_STRING_SCHEMA, null)); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test public void testToBytesIgnoresSchema() throws UnsupportedEncodingException { assertArrayEquals("true".getBytes("UTF8"), converter.fromConnectData(TOPIC, null, true)); }
@Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
StringConverter implements Converter { @Override public byte[] fromConnectData(String topic, Schema schema, Object value) { try { return serializer.serialize(topic, value == null ? null : value.toString()); } catch (SerializationException e) { throw new DataException("Failed to serialize to a string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test public void testBytesToString() { SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes()); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(SAMPLE_STRING, data.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test public void testBytesNullToString() { SchemaAndValue data = converter.toConnectData(TOPIC, null); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(null, data.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
StringConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { try { return new SchemaAndValue(Schema.OPTIONAL_STRING_SCHEMA, deserializer.deserialize(topic, value)); } catch (SerializationException e) { throw new DataException("Failed to deserialize string: ", e); } } StringConverter(); @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); }
@Test(expected = ConnectException.class) public void testMultipleSourcesInvalid() { sourceProperties.put(FileStreamSourceConnector.TOPIC_CONFIG, MULTIPLE_TOPICS); connector.start(sourceProperties); }
@Override public void start(Map<String, String> props) { filename = props.get(FILE_CONFIG); topic = props.get(TOPIC_CONFIG); if (topic == null || topic.isEmpty()) throw new ConnectException("FileStreamSourceConnector configuration must include 'topic' setting"); if (topic.contains(",")) throw new ConnectException("FileStreamSourceConnector should only have a single topic when used as a source."); }
FileStreamSourceConnector extends SourceConnector { @Override public void start(Map<String, String> props) { filename = props.get(FILE_CONFIG); topic = props.get(TOPIC_CONFIG); if (topic == null || topic.isEmpty()) throw new ConnectException("FileStreamSourceConnector configuration must include 'topic' setting"); if (topic.contains(",")) throw new ConnectException("FileStreamSourceConnector should only have a single topic when used as a source."); } }
FileStreamSourceConnector extends SourceConnector { @Override public void start(Map<String, String> props) { filename = props.get(FILE_CONFIG); topic = props.get(TOPIC_CONFIG); if (topic == null || topic.isEmpty()) throw new ConnectException("FileStreamSourceConnector configuration must include 'topic' setting"); if (topic.contains(",")) throw new ConnectException("FileStreamSourceConnector should only have a single topic when used as a source."); } }
FileStreamSourceConnector extends SourceConnector { @Override public void start(Map<String, String> props) { filename = props.get(FILE_CONFIG); topic = props.get(TOPIC_CONFIG); if (topic == null || topic.isEmpty()) throw new ConnectException("FileStreamSourceConnector configuration must include 'topic' setting"); if (topic.contains(",")) throw new ConnectException("FileStreamSourceConnector should only have a single topic when used as a source."); } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); }
FileStreamSourceConnector extends SourceConnector { @Override public void start(Map<String, String> props) { filename = props.get(FILE_CONFIG); topic = props.get(TOPIC_CONFIG); if (topic == null || topic.isEmpty()) throw new ConnectException("FileStreamSourceConnector configuration must include 'topic' setting"); if (topic.contains(",")) throw new ConnectException("FileStreamSourceConnector should only have a single topic when used as a source."); } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); static final String TOPIC_CONFIG; static final String FILE_CONFIG; }
@Test public void testTaskClass() { PowerMock.replayAll(); connector.start(sourceProperties); assertEquals(FileStreamSourceTask.class, connector.taskClass()); PowerMock.verifyAll(); }
@Override public Class<? extends Task> taskClass() { return FileStreamSourceTask.class; }
FileStreamSourceConnector extends SourceConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSourceTask.class; } }
FileStreamSourceConnector extends SourceConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSourceTask.class; } }
FileStreamSourceConnector extends SourceConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSourceTask.class; } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); }
FileStreamSourceConnector extends SourceConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSourceTask.class; } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); static final String TOPIC_CONFIG; static final String FILE_CONFIG; }
@Test public void testTaskClass() { PowerMock.replayAll(); connector.start(sinkProperties); assertEquals(FileStreamSinkTask.class, connector.taskClass()); PowerMock.verifyAll(); }
@Override public Class<? extends Task> taskClass() { return FileStreamSinkTask.class; }
FileStreamSinkConnector extends SinkConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSinkTask.class; } }
FileStreamSinkConnector extends SinkConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSinkTask.class; } }
FileStreamSinkConnector extends SinkConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSinkTask.class; } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); }
FileStreamSinkConnector extends SinkConnector { @Override public Class<? extends Task> taskClass() { return FileStreamSinkTask.class; } @Override String version(); @Override void start(Map<String, String> props); @Override Class<? extends Task> taskClass(); @Override List<Map<String, String>> taskConfigs(int maxTasks); @Override void stop(); @Override ConfigDef config(); static final String FILE_CONFIG; }
@Test(expected = ConnectException.class) public void testMissingTopic() throws InterruptedException { replay(); config.remove(FileStreamSourceConnector.TOPIC_CONFIG); task.start(config); }
@Override public void start(Map<String, String> props) { filename = props.get(FileStreamSourceConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { stream = System.in; streamOffset = null; reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); if (topic == null) throw new ConnectException("FileStreamSourceTask config missing topic setting"); }
FileStreamSourceTask extends SourceTask { @Override public void start(Map<String, String> props) { filename = props.get(FileStreamSourceConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { stream = System.in; streamOffset = null; reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); if (topic == null) throw new ConnectException("FileStreamSourceTask config missing topic setting"); } }
FileStreamSourceTask extends SourceTask { @Override public void start(Map<String, String> props) { filename = props.get(FileStreamSourceConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { stream = System.in; streamOffset = null; reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); if (topic == null) throw new ConnectException("FileStreamSourceTask config missing topic setting"); } }
FileStreamSourceTask extends SourceTask { @Override public void start(Map<String, String> props) { filename = props.get(FileStreamSourceConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { stream = System.in; streamOffset = null; reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); if (topic == null) throw new ConnectException("FileStreamSourceTask config missing topic setting"); } @Override String version(); @Override void start(Map<String, String> props); @Override List<SourceRecord> poll(); @Override void stop(); }
FileStreamSourceTask extends SourceTask { @Override public void start(Map<String, String> props) { filename = props.get(FileStreamSourceConnector.FILE_CONFIG); if (filename == null || filename.isEmpty()) { stream = System.in; streamOffset = null; reader = new BufferedReader(new InputStreamReader(stream, StandardCharsets.UTF_8)); } topic = props.get(FileStreamSourceConnector.TOPIC_CONFIG); if (topic == null) throw new ConnectException("FileStreamSourceTask config missing topic setting"); } @Override String version(); @Override void start(Map<String, String> props); @Override List<SourceRecord> poll(); @Override void stop(); static final String FILENAME_FIELD; static final String POSITION_FIELD; }
@Test public void testMaxUsableProduceMagic() { ApiVersions apiVersions = new ApiVersions(); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); apiVersions.update("0", NodeApiVersions.create()); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); apiVersions.update("1", NodeApiVersions.create(Collections.singleton( new ApiVersionsResponse.ApiVersion(ApiKeys.PRODUCE.id, (short) 0, (short) 2)))); assertEquals(RecordBatch.MAGIC_VALUE_V1, apiVersions.maxUsableProduceMagic()); apiVersions.remove("1"); assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, apiVersions.maxUsableProduceMagic()); }
public synchronized byte maxUsableProduceMagic() { return maxUsableProduceMagic; }
ApiVersions { public synchronized byte maxUsableProduceMagic() { return maxUsableProduceMagic; } }
ApiVersions { public synchronized byte maxUsableProduceMagic() { return maxUsableProduceMagic; } }
ApiVersions { public synchronized byte maxUsableProduceMagic() { return maxUsableProduceMagic; } synchronized void update(String nodeId, NodeApiVersions nodeApiVersions); synchronized void remove(String nodeId); synchronized NodeApiVersions get(String nodeId); synchronized byte maxUsableProduceMagic(); }
ApiVersions { public synchronized byte maxUsableProduceMagic() { return maxUsableProduceMagic; } synchronized void update(String nodeId, NodeApiVersions nodeApiVersions); synchronized void remove(String nodeId); synchronized NodeApiVersions get(String nodeId); synchronized byte maxUsableProduceMagic(); }
@Test public void dateToConnectOptional() { Schema schema = Date.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testParseAndValidateAddresses() { check("127.0.0.1:8000"); check("mydomain.com:8080"); check("[::1]:8000"); check("[2001:db8:85a3:8d3:1319:8a2e:370:7348]:1234", "mydomain.com:10000"); List<InetSocketAddress> validatedAddresses = check("some.invalid.hostname.foo.bar.local:9999", "mydomain.com:10000"); assertEquals(1, validatedAddresses.size()); InetSocketAddress onlyAddress = validatedAddresses.get(0); assertEquals("mydomain.com", onlyAddress.getHostName()); assertEquals(10000, onlyAddress.getPort()); }
public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls) { List<InetSocketAddress> addresses = new ArrayList<>(); for (String url : urls) { if (url != null && !url.isEmpty()) { try { String host = getHost(url); Integer port = getPort(url); if (host == null || port == null) throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); InetSocketAddress address = new InetSocketAddress(host, port); if (address.isUnresolved()) { log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); } else { addresses.add(address); } } catch (IllegalArgumentException e) { throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); } } } if (addresses.isEmpty()) throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); return addresses; }
ClientUtils { public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls) { List<InetSocketAddress> addresses = new ArrayList<>(); for (String url : urls) { if (url != null && !url.isEmpty()) { try { String host = getHost(url); Integer port = getPort(url); if (host == null || port == null) throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); InetSocketAddress address = new InetSocketAddress(host, port); if (address.isUnresolved()) { log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); } else { addresses.add(address); } } catch (IllegalArgumentException e) { throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); } } } if (addresses.isEmpty()) throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); return addresses; } }
ClientUtils { public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls) { List<InetSocketAddress> addresses = new ArrayList<>(); for (String url : urls) { if (url != null && !url.isEmpty()) { try { String host = getHost(url); Integer port = getPort(url); if (host == null || port == null) throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); InetSocketAddress address = new InetSocketAddress(host, port); if (address.isUnresolved()) { log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); } else { addresses.add(address); } } catch (IllegalArgumentException e) { throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); } } } if (addresses.isEmpty()) throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); return addresses; } }
ClientUtils { public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls) { List<InetSocketAddress> addresses = new ArrayList<>(); for (String url : urls) { if (url != null && !url.isEmpty()) { try { String host = getHost(url); Integer port = getPort(url); if (host == null || port == null) throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); InetSocketAddress address = new InetSocketAddress(host, port); if (address.isUnresolved()) { log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); } else { addresses.add(address); } } catch (IllegalArgumentException e) { throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); } } } if (addresses.isEmpty()) throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); return addresses; } static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls); static void closeQuietly(Closeable c, String name, AtomicReference<Throwable> firstException); static ChannelBuilder createChannelBuilder(AbstractConfig config); }
ClientUtils { public static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls) { List<InetSocketAddress> addresses = new ArrayList<>(); for (String url : urls) { if (url != null && !url.isEmpty()) { try { String host = getHost(url); Integer port = getPort(url); if (host == null || port == null) throw new ConfigException("Invalid url in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); InetSocketAddress address = new InetSocketAddress(host, port); if (address.isUnresolved()) { log.warn("Removing server {} from {} as DNS resolution failed for {}", url, CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, host); } else { addresses.add(address); } } catch (IllegalArgumentException e) { throw new ConfigException("Invalid port in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG + ": " + url); } } } if (addresses.isEmpty()) throw new ConfigException("No resolvable bootstrap urls given in " + CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG); return addresses; } static List<InetSocketAddress> parseAndValidateAddresses(List<String> urls); static void closeQuietly(Closeable c, String name, AtomicReference<Throwable> firstException); static ChannelBuilder createChannelBuilder(AbstractConfig config); }
@Test public void testClose() { client.ready(node, time.milliseconds()); awaitReady(client, node); client.poll(1, time.milliseconds()); assertTrue("The client should be ready", client.isReady(node, time.milliseconds())); ProduceRequest.Builder builder = new ProduceRequest.Builder(RecordBatch.CURRENT_MAGIC_VALUE, (short) 1, 1000, Collections.<TopicPartition, MemoryRecords>emptyMap()); ClientRequest request = client.newClientRequest(node.idString(), builder, time.milliseconds(), true); client.send(request, time.milliseconds()); assertEquals("There should be 1 in-flight request after send", 1, client.inFlightRequestCount(node.idString())); assertTrue(client.hasInFlightRequests(node.idString())); assertTrue(client.hasInFlightRequests()); client.close(node.idString()); assertEquals("There should be no in-flight request after close", 0, client.inFlightRequestCount(node.idString())); assertFalse(client.hasInFlightRequests(node.idString())); assertFalse(client.hasInFlightRequests()); assertFalse("Connection should not be ready after close", client.isReady(node, 0)); }
@Override public void close(String nodeId) { selector.close(nodeId); for (InFlightRequest request : inFlightRequests.clearAll(nodeId)) if (request.isInternalRequest && request.header.apiKey() == ApiKeys.METADATA.id) metadataUpdater.handleDisconnection(request.destination); connectionStates.remove(nodeId); }
NetworkClient implements KafkaClient { @Override public void close(String nodeId) { selector.close(nodeId); for (InFlightRequest request : inFlightRequests.clearAll(nodeId)) if (request.isInternalRequest && request.header.apiKey() == ApiKeys.METADATA.id) metadataUpdater.handleDisconnection(request.destination); connectionStates.remove(nodeId); } }
NetworkClient implements KafkaClient { @Override public void close(String nodeId) { selector.close(nodeId); for (InFlightRequest request : inFlightRequests.clearAll(nodeId)) if (request.isInternalRequest && request.header.apiKey() == ApiKeys.METADATA.id) metadataUpdater.handleDisconnection(request.destination); connectionStates.remove(nodeId); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); }
NetworkClient implements KafkaClient { @Override public void close(String nodeId) { selector.close(nodeId); for (InFlightRequest request : inFlightRequests.clearAll(nodeId)) if (request.isInternalRequest && request.header.apiKey() == ApiKeys.METADATA.id) metadataUpdater.handleDisconnection(request.destination); connectionStates.remove(nodeId); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
NetworkClient implements KafkaClient { @Override public void close(String nodeId) { selector.close(nodeId); for (InFlightRequest request : inFlightRequests.clearAll(nodeId)) if (request.isInternalRequest && request.header.apiKey() == ApiKeys.METADATA.id) metadataUpdater.handleDisconnection(request.destination); connectionStates.remove(nodeId); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testLeastLoadedNode() { client.ready(node, time.milliseconds()); awaitReady(client, node); client.poll(1, time.milliseconds()); assertTrue("The client should be ready", client.isReady(node, time.milliseconds())); Node leastNode = client.leastLoadedNode(time.milliseconds()); assertEquals("There should be one leastloadednode", leastNode.id(), node.id()); time.sleep(reconnectBackoffMsTest); selector.close(node.idString()); client.poll(1, time.milliseconds()); assertFalse("After we forced the disconnection the client is no longer ready.", client.ready(node, time.milliseconds())); leastNode = client.leastLoadedNode(time.milliseconds()); assertEquals("There should be NO leastloadednode", leastNode, null); }
@Override public Node leastLoadedNode(long now) { List<Node> nodes = this.metadataUpdater.fetchNodes(); int inflight = Integer.MAX_VALUE; Node found = null; int offset = this.randOffset.nextInt(nodes.size()); for (int i = 0; i < nodes.size(); i++) { int idx = (offset + i) % nodes.size(); Node node = nodes.get(idx); int currInflight = this.inFlightRequests.count(node.idString()); if (currInflight == 0 && isReady(node, now)) { log.trace("Found least loaded node {} connected with no in-flight requests", node); return node; } else if (!this.connectionStates.isBlackedOut(node.idString(), now) && currInflight < inflight) { inflight = currInflight; found = node; } else if (log.isTraceEnabled()) { log.trace("Removing node {} from least loaded node selection: is-blacked-out: {}, in-flight-requests: {}", node, this.connectionStates.isBlackedOut(node.idString(), now), currInflight); } } if (found != null) log.trace("Found least loaded node {}", found); else log.trace("Least loaded node selection failed to find an available node"); return found; }
NetworkClient implements KafkaClient { @Override public Node leastLoadedNode(long now) { List<Node> nodes = this.metadataUpdater.fetchNodes(); int inflight = Integer.MAX_VALUE; Node found = null; int offset = this.randOffset.nextInt(nodes.size()); for (int i = 0; i < nodes.size(); i++) { int idx = (offset + i) % nodes.size(); Node node = nodes.get(idx); int currInflight = this.inFlightRequests.count(node.idString()); if (currInflight == 0 && isReady(node, now)) { log.trace("Found least loaded node {} connected with no in-flight requests", node); return node; } else if (!this.connectionStates.isBlackedOut(node.idString(), now) && currInflight < inflight) { inflight = currInflight; found = node; } else if (log.isTraceEnabled()) { log.trace("Removing node {} from least loaded node selection: is-blacked-out: {}, in-flight-requests: {}", node, this.connectionStates.isBlackedOut(node.idString(), now), currInflight); } } if (found != null) log.trace("Found least loaded node {}", found); else log.trace("Least loaded node selection failed to find an available node"); return found; } }
NetworkClient implements KafkaClient { @Override public Node leastLoadedNode(long now) { List<Node> nodes = this.metadataUpdater.fetchNodes(); int inflight = Integer.MAX_VALUE; Node found = null; int offset = this.randOffset.nextInt(nodes.size()); for (int i = 0; i < nodes.size(); i++) { int idx = (offset + i) % nodes.size(); Node node = nodes.get(idx); int currInflight = this.inFlightRequests.count(node.idString()); if (currInflight == 0 && isReady(node, now)) { log.trace("Found least loaded node {} connected with no in-flight requests", node); return node; } else if (!this.connectionStates.isBlackedOut(node.idString(), now) && currInflight < inflight) { inflight = currInflight; found = node; } else if (log.isTraceEnabled()) { log.trace("Removing node {} from least loaded node selection: is-blacked-out: {}, in-flight-requests: {}", node, this.connectionStates.isBlackedOut(node.idString(), now), currInflight); } } if (found != null) log.trace("Found least loaded node {}", found); else log.trace("Least loaded node selection failed to find an available node"); return found; } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); }
NetworkClient implements KafkaClient { @Override public Node leastLoadedNode(long now) { List<Node> nodes = this.metadataUpdater.fetchNodes(); int inflight = Integer.MAX_VALUE; Node found = null; int offset = this.randOffset.nextInt(nodes.size()); for (int i = 0; i < nodes.size(); i++) { int idx = (offset + i) % nodes.size(); Node node = nodes.get(idx); int currInflight = this.inFlightRequests.count(node.idString()); if (currInflight == 0 && isReady(node, now)) { log.trace("Found least loaded node {} connected with no in-flight requests", node); return node; } else if (!this.connectionStates.isBlackedOut(node.idString(), now) && currInflight < inflight) { inflight = currInflight; found = node; } else if (log.isTraceEnabled()) { log.trace("Removing node {} from least loaded node selection: is-blacked-out: {}, in-flight-requests: {}", node, this.connectionStates.isBlackedOut(node.idString(), now), currInflight); } } if (found != null) log.trace("Found least loaded node {}", found); else log.trace("Least loaded node selection failed to find an available node"); return found; } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
NetworkClient implements KafkaClient { @Override public Node leastLoadedNode(long now) { List<Node> nodes = this.metadataUpdater.fetchNodes(); int inflight = Integer.MAX_VALUE; Node found = null; int offset = this.randOffset.nextInt(nodes.size()); for (int i = 0; i < nodes.size(); i++) { int idx = (offset + i) % nodes.size(); Node node = nodes.get(idx); int currInflight = this.inFlightRequests.count(node.idString()); if (currInflight == 0 && isReady(node, now)) { log.trace("Found least loaded node {} connected with no in-flight requests", node); return node; } else if (!this.connectionStates.isBlackedOut(node.idString(), now) && currInflight < inflight) { inflight = currInflight; found = node; } else if (log.isTraceEnabled()) { log.trace("Removing node {} from least loaded node selection: is-blacked-out: {}, in-flight-requests: {}", node, this.connectionStates.isBlackedOut(node.idString(), now), currInflight); } } if (found != null) log.trace("Found least loaded node {}", found); else log.trace("Least loaded node selection failed to find an available node"); return found; } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testConnectionDelay() { long now = time.milliseconds(); long delay = client.connectionDelay(node, now); assertEquals(0, delay); }
@Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testConnectionDelayConnected() { awaitReady(client, node); long now = time.milliseconds(); long delay = client.connectionDelay(node, now); assertEquals(Long.MAX_VALUE, delay); }
@Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
NetworkClient implements KafkaClient { @Override public long connectionDelay(Node node, long now) { return connectionStates.connectionDelay(node.idString(), now); } NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); NetworkClient(Selectable selector, Metadata metadata, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); NetworkClient(Selectable selector, MetadataUpdater metadataUpdater, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions); private NetworkClient(MetadataUpdater metadataUpdater, Metadata metadata, Selectable selector, String clientId, int maxInFlightRequestsPerConnection, long reconnectBackoffMs, long reconnectBackoffMax, int socketSendBuffer, int socketReceiveBuffer, int requestTimeoutMs, Time time, boolean discoverBrokerVersions, ApiVersions apiVersions, Sensor throttleTimeSensor); @Override // 检测是否可以向一个Node发送请求 boolean ready(Node node, long now); @Override void disconnect(String nodeId); @Override void close(String nodeId); @Override long connectionDelay(Node node, long now); @Override boolean connectionFailed(Node node); @Override boolean isReady(Node node, long now); @Override void send(ClientRequest request, long now); @Override // 调用Selector.poll 进行网络IO // 执行完成后 触发回调逻辑 List<ClientResponse> poll(long timeout, long now); @Override int inFlightRequestCount(); @Override boolean hasInFlightRequests(); @Override int inFlightRequestCount(String node); @Override boolean hasInFlightRequests(String node); @Override boolean hasReadyNodes(); @Override void wakeup(); @Override void close(); @Override Node leastLoadedNode(long now); static AbstractResponse parseResponse(ByteBuffer responseBuffer, RequestHeader requestHeader); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse); @Override ClientRequest newClientRequest(String nodeId, AbstractRequest.Builder<?> requestBuilder, long createdTimeMs, boolean expectResponse, RequestCompletionHandler callback); boolean discoverBrokerVersions(); }
@Test public void testUnsupportedVersionsToString() { NodeApiVersions versions = new NodeApiVersions(Collections.<ApiVersion>emptyList()); StringBuilder bld = new StringBuilder(); String prefix = "("; for (ApiKeys apiKey : ApiKeys.values()) { bld.append(prefix).append(apiKey.name). append("(").append(apiKey.id).append("): UNSUPPORTED"); prefix = ", "; } bld.append(")"); assertEquals(bld.toString(), versions.toString()); }
@Override public String toString() { return toString(false); }
NodeApiVersions { @Override public String toString() { return toString(false); } }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void testUnknownApiVersionsToString() { ApiVersion unknownApiVersion = new ApiVersion((short) 337, (short) 0, (short) 1); NodeApiVersions versions = new NodeApiVersions(Collections.singleton(unknownApiVersion)); assertTrue(versions.toString().endsWith("UNKNOWN(337): 0 to 1)")); }
@Override public String toString() { return toString(false); }
NodeApiVersions { @Override public String toString() { return toString(false); } }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void testVersionsToString() { List<ApiVersion> versionList = new ArrayList<>(); for (ApiKeys apiKey : ApiKeys.values()) { if (apiKey == ApiKeys.CONTROLLED_SHUTDOWN_KEY) { versionList.add(new ApiVersion(apiKey.id, (short) 0, (short) 0)); } else if (apiKey == ApiKeys.DELETE_TOPICS) { versionList.add(new ApiVersion(apiKey.id, (short) 10000, (short) 10001)); } else { versionList.add(new ApiVersion(apiKey)); } } NodeApiVersions versions = new NodeApiVersions(versionList); StringBuilder bld = new StringBuilder(); String prefix = "("; for (ApiKeys apiKey : ApiKeys.values()) { bld.append(prefix); if (apiKey == ApiKeys.CONTROLLED_SHUTDOWN_KEY) { bld.append("ControlledShutdown(7): 0 [unusable: node too old]"); } else if (apiKey == ApiKeys.DELETE_TOPICS) { bld.append("DeleteTopics(20): 10000 to 10001 [unusable: node too new]"); } else { bld.append(apiKey.name).append("("). append(apiKey.id).append("): "); if (apiKey.oldestVersion() == apiKey.latestVersion()) { bld.append(apiKey.oldestVersion()); } else { bld.append(apiKey.oldestVersion()). append(" to "). append(apiKey.latestVersion()); } bld.append(" [usable: ").append(apiKey.latestVersion()). append("]"); } prefix = ", "; } bld.append(")"); assertEquals(bld.toString(), versions.toString()); }
@Override public String toString() { return toString(false); }
NodeApiVersions { @Override public String toString() { return toString(false); } }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
NodeApiVersions { @Override public String toString() { return toString(false); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void testUsableVersionCalculation() { List<ApiVersion> versionList = new ArrayList<>(); versionList.add(new ApiVersion(ApiKeys.CONTROLLED_SHUTDOWN_KEY.id, (short) 0, (short) 0)); versionList.add(new ApiVersion(ApiKeys.FETCH.id, (short) 1, (short) 2)); NodeApiVersions versions = new NodeApiVersions(versionList); try { versions.usableVersion(ApiKeys.CONTROLLED_SHUTDOWN_KEY); Assert.fail("expected UnsupportedVersionException"); } catch (UnsupportedVersionException e) { } assertEquals(2, versions.usableVersion(ApiKeys.FETCH)); }
public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test(expected = UnsupportedVersionException.class) public void testUsableVersionCalculationNoKnownVersions() { List<ApiVersion> versionList = new ArrayList<>(); NodeApiVersions versions = new NodeApiVersions(versionList); versions.usableVersion(ApiKeys.FETCH); }
public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void dateToConnectWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Date.builder().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testUsableVersionLatestVersions() { List<ApiVersion> versionList = new LinkedList<>(); for (ApiVersion apiVersion: ApiVersionsResponse.API_VERSIONS_RESPONSE.apiVersions()) { versionList.add(apiVersion); } versionList.add(new ApiVersion((short) 100, (short) 0, (short) 1)); NodeApiVersions versions = new NodeApiVersions(versionList); for (ApiKeys apiKey: ApiKeys.values()) { assertEquals(apiKey.latestVersion(), versions.usableVersion(apiKey)); } }
public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
NodeApiVersions { public short usableVersion(ApiKeys apiKey) { return usableVersion(apiKey, null); } NodeApiVersions(Collection<ApiVersion> nodeApiVersions); static NodeApiVersions create(); static NodeApiVersions create(Collection<ApiVersion> overrides); short usableVersion(ApiKeys apiKey); short usableVersion(ApiKeys apiKey, Short desiredVersion); @Override String toString(); String toString(boolean lineBreaks); ApiVersion apiVersion(ApiKeys apiKey); }
@Test public void testDeserializerToPropertyConfig() { Properties properties = new Properties(); properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); Properties newProperties = ConsumerConfig.addDeserializerToConfig(properties, null, null); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); properties.clear(); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); newProperties = ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, null); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); properties.clear(); properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); newProperties = ConsumerConfig.addDeserializerToConfig(properties, null, valueDeserializer); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); properties.clear(); newProperties = ConsumerConfig.addDeserializerToConfig(properties, keyDeserializer, valueDeserializer); assertEquals(newProperties.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClassName); assertEquals(newProperties.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClassName); }
public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Properties addDeserializerToConfig(Properties properties, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Set<String> configNames(); static void main(String[] args); }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Properties addDeserializerToConfig(Properties properties, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Set<String> configNames(); static void main(String[] args); static final String GROUP_ID_CONFIG; static final String MAX_POLL_RECORDS_CONFIG; static final String MAX_POLL_INTERVAL_MS_CONFIG; static final String SESSION_TIMEOUT_MS_CONFIG; static final String HEARTBEAT_INTERVAL_MS_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String ENABLE_AUTO_COMMIT_CONFIG; static final String AUTO_COMMIT_INTERVAL_MS_CONFIG; static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG; static final String AUTO_OFFSET_RESET_CONFIG; static final String AUTO_OFFSET_RESET_DOC; static final String FETCH_MIN_BYTES_CONFIG; static final String FETCH_MAX_BYTES_CONFIG; static final int DEFAULT_FETCH_MAX_BYTES; static final String FETCH_MAX_WAIT_MS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String MAX_PARTITION_FETCH_BYTES_CONFIG; static final int DEFAULT_MAX_PARTITION_FETCH_BYTES; static final String SEND_BUFFER_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String CLIENT_ID_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String CHECK_CRCS_CONFIG; static final String KEY_DESERIALIZER_CLASS_CONFIG; static final String KEY_DESERIALIZER_CLASS_DOC; static final String VALUE_DESERIALIZER_CLASS_CONFIG; static final String VALUE_DESERIALIZER_CLASS_DOC; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String INTERCEPTOR_CLASSES_CONFIG; static final String INTERCEPTOR_CLASSES_DOC; static final String EXCLUDE_INTERNAL_TOPICS_CONFIG; static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS; static final String ISOLATION_LEVEL_CONFIG; static final String ISOLATION_LEVEL_DOC; static final String DEFAULT_ISOLATION_LEVEL; }
@Test public void testDeserializerToMapConfig() { Map<String, Object> configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); Map<String, Object> newConfigs = ConsumerConfig.addDeserializerToConfig(configs, null, null); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); configs.clear(); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); newConfigs = ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, null); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); configs.clear(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); newConfigs = ConsumerConfig.addDeserializerToConfig(configs, null, valueDeserializer); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); configs.clear(); newConfigs = ConsumerConfig.addDeserializerToConfig(configs, keyDeserializer, valueDeserializer); assertEquals(newConfigs.get(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG), keyDeserializerClass); assertEquals(newConfigs.get(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG), valueDeserializerClass); }
public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Properties addDeserializerToConfig(Properties properties, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Set<String> configNames(); static void main(String[] args); }
ConsumerConfig extends AbstractConfig { public static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer) { Map<String, Object> newConfigs = new HashMap<String, Object>(); newConfigs.putAll(configs); if (keyDeserializer != null) newConfigs.put(KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializer.getClass()); if (valueDeserializer != null) newConfigs.put(VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); return newConfigs; } ConsumerConfig(Map<?, ?> props); ConsumerConfig(Map<?, ?> props, boolean doLog); static Map<String, Object> addDeserializerToConfig(Map<String, Object> configs, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Properties addDeserializerToConfig(Properties properties, Deserializer<?> keyDeserializer, Deserializer<?> valueDeserializer); static Set<String> configNames(); static void main(String[] args); static final String GROUP_ID_CONFIG; static final String MAX_POLL_RECORDS_CONFIG; static final String MAX_POLL_INTERVAL_MS_CONFIG; static final String SESSION_TIMEOUT_MS_CONFIG; static final String HEARTBEAT_INTERVAL_MS_CONFIG; static final String BOOTSTRAP_SERVERS_CONFIG; static final String ENABLE_AUTO_COMMIT_CONFIG; static final String AUTO_COMMIT_INTERVAL_MS_CONFIG; static final String PARTITION_ASSIGNMENT_STRATEGY_CONFIG; static final String AUTO_OFFSET_RESET_CONFIG; static final String AUTO_OFFSET_RESET_DOC; static final String FETCH_MIN_BYTES_CONFIG; static final String FETCH_MAX_BYTES_CONFIG; static final int DEFAULT_FETCH_MAX_BYTES; static final String FETCH_MAX_WAIT_MS_CONFIG; static final String METADATA_MAX_AGE_CONFIG; static final String MAX_PARTITION_FETCH_BYTES_CONFIG; static final int DEFAULT_MAX_PARTITION_FETCH_BYTES; static final String SEND_BUFFER_CONFIG; static final String RECEIVE_BUFFER_CONFIG; static final String CLIENT_ID_CONFIG; static final String RECONNECT_BACKOFF_MS_CONFIG; static final String RECONNECT_BACKOFF_MAX_MS_CONFIG; static final String RETRY_BACKOFF_MS_CONFIG; static final String METRICS_SAMPLE_WINDOW_MS_CONFIG; static final String METRICS_NUM_SAMPLES_CONFIG; static final String METRICS_RECORDING_LEVEL_CONFIG; static final String METRIC_REPORTER_CLASSES_CONFIG; static final String CHECK_CRCS_CONFIG; static final String KEY_DESERIALIZER_CLASS_CONFIG; static final String KEY_DESERIALIZER_CLASS_DOC; static final String VALUE_DESERIALIZER_CLASS_CONFIG; static final String VALUE_DESERIALIZER_CLASS_DOC; static final String CONNECTIONS_MAX_IDLE_MS_CONFIG; static final String REQUEST_TIMEOUT_MS_CONFIG; static final String INTERCEPTOR_CLASSES_CONFIG; static final String INTERCEPTOR_CLASSES_DOC; static final String EXCLUDE_INTERNAL_TOPICS_CONFIG; static final boolean DEFAULT_EXCLUDE_INTERNAL_TOPICS; static final String ISOLATION_LEVEL_CONFIG; static final String ISOLATION_LEVEL_DOC; static final String DEFAULT_ISOLATION_LEVEL; }
@Test public void testOneConsumerNoTopic() { String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(Collections.<String>emptyList())); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testOneConsumerNonexistentTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 0); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(assignment.get(consumerId).isEmpty()); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testOneConsumerOneTopic() { String topic = "topic"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testOnlyAssignsPartitionsFromSubscribedTopics() { String topic = "topic"; String otherTopic = "other"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 3); partitionsPerTopic.put(otherTopic, 3); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0), tp(topic, 1), tp(topic, 2)), assignment.get(consumerId)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testOneConsumerMultipleTopics() { String topic1 = "topic1"; String topic2 = "topic2"; String consumerId = "consumer"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 1); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> subscriptions = Collections.singletonMap(consumerId, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic1, 0), tp(topic2, 0), tp(topic2, 1)), assignment.get(consumerId)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testTwoConsumersOneTopicOnePartition() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 1); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic))); subscriptions.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(Collections.<TopicPartition>emptyList(), assignment.get(consumer2)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void dateToConnectOptionalWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Date.builder().optional().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Date\", \"version\": 1, \"optional\": true, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testTwoConsumersOneTopicTwoPartitions() { String topic = "topic"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic, 2); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic))); subscriptions.put(consumer2, new Subscription(topics(topic))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic, 0)), assignment.get(consumer1)); assertEquals(partitions(tp(topic, 1)), assignment.get(consumer2)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testMultipleConsumersMixedTopicSubscriptions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; String consumer3 = "consumer3"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 2); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic1))); subscriptions.put(consumer2, new Subscription(topics(topic1, topic2))); subscriptions.put(consumer3, new Subscription(topics(topic1))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2)), assignment.get(consumer1)); assertEquals(partitions(tp(topic2, 0), tp(topic2, 1)), assignment.get(consumer2)); assertEquals(partitions(tp(topic1, 1)), assignment.get(consumer3)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testTwoConsumersTwoTopicsSixPartitions() { String topic1 = "topic1"; String topic2 = "topic2"; String consumer1 = "consumer1"; String consumer2 = "consumer2"; Map<String, Integer> partitionsPerTopic = new HashMap<>(); partitionsPerTopic.put(topic1, 3); partitionsPerTopic.put(topic2, 3); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put(consumer1, new Subscription(topics(topic1, topic2))); subscriptions.put(consumer2, new Subscription(topics(topic1, topic2))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); assertEquals(partitions(tp(topic1, 0), tp(topic1, 2), tp(topic2, 1)), assignment.get(consumer1)); assertEquals(partitions(tp(topic1, 1), tp(topic2, 0), tp(topic2, 2)), assignment.get(consumer2)); verifyValidityAndBalance(subscriptions, assignment); assertTrue(isFullyBalanced(assignment)); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testPoorRoundRobinAssignmentScenario() { Map<String, Integer> partitionsPerTopic = new HashMap<>(); for (int i = 1; i <= 5; i++) partitionsPerTopic.put(String.format("topic%d", i), (i % 2) + 1); Map<String, Subscription> subscriptions = new HashMap<>(); subscriptions.put("consumer1", new Subscription(topics("topic1", "topic2", "topic3", "topic4", "topic5"))); subscriptions.put("consumer2", new Subscription(topics("topic1", "topic3", "topic5"))); subscriptions.put("consumer3", new Subscription(topics("topic1", "topic3", "topic5"))); subscriptions.put("consumer4", new Subscription(topics("topic1", "topic2", "topic3", "topic4", "topic5"))); Map<String, List<TopicPartition>> assignment = assignor.assign(partitionsPerTopic, subscriptions); verifyValidityAndBalance(subscriptions, assignment); }
public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
StickyAssignor extends AbstractPartitionAssignor { public Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions) { Map<String, List<TopicPartition>> currentAssignment = new HashMap<>(); partitionMovements = new PartitionMovements(); prepopulateCurrentAssignments(subscriptions, currentAssignment); boolean isFreshAssignment = currentAssignment.isEmpty(); final Map<TopicPartition, List<String>> partition2AllPotentialConsumers = new HashMap<>(); final Map<String, List<TopicPartition>> consumer2AllPotentialPartitions = new HashMap<>(); for (Entry<String, Integer> entry: partitionsPerTopic.entrySet()) { for (int i = 0; i < entry.getValue(); ++i) partition2AllPotentialConsumers.put(new TopicPartition(entry.getKey(), i), new ArrayList<String>()); } for (Entry<String, Subscription> entry: subscriptions.entrySet()) { String consumer = entry.getKey(); consumer2AllPotentialPartitions.put(consumer, new ArrayList<TopicPartition>()); for (String topic: entry.getValue().topics()) { for (int i = 0; i < partitionsPerTopic.get(topic); ++i) { TopicPartition topicPartition = new TopicPartition(topic, i); consumer2AllPotentialPartitions.get(consumer).add(topicPartition); partition2AllPotentialConsumers.get(topicPartition).add(consumer); } } if (!currentAssignment.containsKey(consumer)) currentAssignment.put(consumer, new ArrayList<TopicPartition>()); } Map<TopicPartition, String> currentPartitionConsumer = new HashMap<>(); for (Map.Entry<String, List<TopicPartition>> entry: currentAssignment.entrySet()) for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.put(topicPartition, entry.getKey()); List<TopicPartition> sortedPartitions = sortPartitions( currentAssignment, isFreshAssignment, partition2AllPotentialConsumers, consumer2AllPotentialPartitions); List<TopicPartition> unassignedPartitions = new ArrayList<>(sortedPartitions); for (Iterator<Map.Entry<String, List<TopicPartition>>> it = currentAssignment.entrySet().iterator(); it.hasNext();) { Map.Entry<String, List<TopicPartition>> entry = it.next(); if (!subscriptions.containsKey(entry.getKey())) { for (TopicPartition topicPartition: entry.getValue()) currentPartitionConsumer.remove(topicPartition); it.remove(); } else { for (Iterator<TopicPartition> partitionIter = entry.getValue().iterator(); partitionIter.hasNext();) { TopicPartition partition = partitionIter.next(); if (!partition2AllPotentialConsumers.containsKey(partition)) { partitionIter.remove(); currentPartitionConsumer.remove(partition); } else if (!subscriptions.get(entry.getKey()).topics().contains(partition.topic())) { partitionIter.remove(); } else unassignedPartitions.remove(partition); } } } TreeSet<String> sortedCurrentSubscriptions = new TreeSet<>(new SubscriptionComparator(currentAssignment)); sortedCurrentSubscriptions.addAll(currentAssignment.keySet()); balance(currentAssignment, sortedPartitions, unassignedPartitions, sortedCurrentSubscriptions, consumer2AllPotentialPartitions, partition2AllPotentialConsumers, currentPartitionConsumer); return currentAssignment; } Map<String, List<TopicPartition>> assign(Map<String, Integer> partitionsPerTopic, Map<String, Subscription> subscriptions); @Override void onAssignment(Assignment assignment); @Override Subscription subscription(Set<String> topics); @Override String name(); }
@Test public void testFetchRequestWhenRecordTooLarge() { try { client.setNodeApiVersions(NodeApiVersions.create(Collections.singletonList( new ApiVersionsResponse.ApiVersion(ApiKeys.FETCH.id, (short) 2, (short) 2)))); makeFetchRequestWithIncompleteRecord(); try { fetcher.fetchedRecords(); fail("RecordTooLargeException should have been raised"); } catch (RecordTooLargeException e) { assertTrue(e.getMessage().startsWith("There are some messages at [Partition=Offset]: ")); assertEquals(0, subscriptions.position(tp1).longValue()); } } finally { client.setNodeApiVersions(NodeApiVersions.create()); } }
public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetchRequestInternalError() { makeFetchRequestWithIncompleteRecord(); try { fetcher.fetchedRecords(); fail("RecordTooLargeException should have been raised"); } catch (KafkaException e) { assertTrue(e.getMessage().startsWith("Failed to make progress reading messages")); assertEquals(0, subscriptions.position(tp1).longValue()); } }
public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords() { Map<TopicPartition, List<ConsumerRecord<K, V>>> fetched = new HashMap<>(); int recordsRemaining = maxPollRecords; try { while (recordsRemaining > 0) { if (nextInLineRecords == null || nextInLineRecords.isFetched) { CompletedFetch completedFetch = completedFetches.peek(); if (completedFetch == null) break; nextInLineRecords = parseCompletedFetch(completedFetch); completedFetches.poll(); } else { List<ConsumerRecord<K, V>> records = fetchRecords(nextInLineRecords, recordsRemaining); TopicPartition partition = nextInLineRecords.partition; if (!records.isEmpty()) { List<ConsumerRecord<K, V>> currentRecords = fetched.get(partition); if (currentRecords == null) { fetched.put(partition, records); } else { List<ConsumerRecord<K, V>> newRecords = new ArrayList<>(records.size() + currentRecords.size()); newRecords.addAll(currentRecords); newRecords.addAll(records); fetched.put(partition, newRecords); } recordsRemaining -= records.size(); } } } } catch (KafkaException e) { if (fetched.isEmpty()) throw e; } return fetched; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetchOnPausedPartition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); subscriptions.pause(tp1); assertFalse(fetcher.sendFetches() > 0); assertTrue(client.requests().isEmpty()); }
public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetchTarget = fetchEntry.getKey(); log.debug("Sending {} fetch for partitions {} to broker {}", isolationLevel, request.fetchData().keySet(), fetchTarget); client.send(fetchTarget, request) .addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { FetchResponse response = (FetchResponse) resp.responseBody(); if (!matchesRequestedPartitions(request, response)) { log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet()); return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = request.fetchData().get(partition).fetchOffset; FetchResponse.PartitionData fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } @Override public void onFailure(RuntimeException e) { log.debug("Fetch request {} to {} failed", request.fetchData(), fetchTarget, e); } }); } return fetchRequestMap.size(); }
Fetcher implements SubscriptionState.Listener, Closeable { public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetchTarget = fetchEntry.getKey(); log.debug("Sending {} fetch for partitions {} to broker {}", isolationLevel, request.fetchData().keySet(), fetchTarget); client.send(fetchTarget, request) .addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { FetchResponse response = (FetchResponse) resp.responseBody(); if (!matchesRequestedPartitions(request, response)) { log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet()); return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = request.fetchData().get(partition).fetchOffset; FetchResponse.PartitionData fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } @Override public void onFailure(RuntimeException e) { log.debug("Fetch request {} to {} failed", request.fetchData(), fetchTarget, e); } }); } return fetchRequestMap.size(); } }
Fetcher implements SubscriptionState.Listener, Closeable { public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetchTarget = fetchEntry.getKey(); log.debug("Sending {} fetch for partitions {} to broker {}", isolationLevel, request.fetchData().keySet(), fetchTarget); client.send(fetchTarget, request) .addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { FetchResponse response = (FetchResponse) resp.responseBody(); if (!matchesRequestedPartitions(request, response)) { log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet()); return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = request.fetchData().get(partition).fetchOffset; FetchResponse.PartitionData fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } @Override public void onFailure(RuntimeException e) { log.debug("Fetch request {} to {} failed", request.fetchData(), fetchTarget, e); } }); } return fetchRequestMap.size(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetchTarget = fetchEntry.getKey(); log.debug("Sending {} fetch for partitions {} to broker {}", isolationLevel, request.fetchData().keySet(), fetchTarget); client.send(fetchTarget, request) .addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { FetchResponse response = (FetchResponse) resp.responseBody(); if (!matchesRequestedPartitions(request, response)) { log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet()); return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = request.fetchData().get(partition).fetchOffset; FetchResponse.PartitionData fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } @Override public void onFailure(RuntimeException e) { log.debug("Fetch request {} to {} failed", request.fetchData(), fetchTarget, e); } }); } return fetchRequestMap.size(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public int sendFetches() { Map<Node, FetchRequest.Builder> fetchRequestMap = createFetchRequests(); for (Map.Entry<Node, FetchRequest.Builder> fetchEntry : fetchRequestMap.entrySet()) { final FetchRequest.Builder request = fetchEntry.getValue(); final Node fetchTarget = fetchEntry.getKey(); log.debug("Sending {} fetch for partitions {} to broker {}", isolationLevel, request.fetchData().keySet(), fetchTarget); client.send(fetchTarget, request) .addListener(new RequestFutureListener<ClientResponse>() { @Override public void onSuccess(ClientResponse resp) { FetchResponse response = (FetchResponse) resp.responseBody(); if (!matchesRequestedPartitions(request, response)) { log.warn("Ignoring fetch response containing partitions {} since it does not match " + "the requested partitions {}", response.responseData().keySet(), request.fetchData().keySet()); return; } Set<TopicPartition> partitions = new HashSet<>(response.responseData().keySet()); FetchResponseMetricAggregator metricAggregator = new FetchResponseMetricAggregator(sensors, partitions); for (Map.Entry<TopicPartition, FetchResponse.PartitionData> entry : response.responseData().entrySet()) { TopicPartition partition = entry.getKey(); long fetchOffset = request.fetchData().get(partition).fetchOffset; FetchResponse.PartitionData fetchData = entry.getValue(); log.debug("Fetch {} at offset {} for partition {} returned fetch data {}", isolationLevel, fetchOffset, partition, fetchData); completedFetches.add(new CompletedFetch(partition, fetchOffset, fetchData, metricAggregator, resp.requestHeader().apiVersion())); } sensors.fetchLatency.record(resp.requestLatencyMs()); } @Override public void onFailure(RuntimeException e) { log.debug("Fetch request {} to {} failed", request.fetchData(), fetchTarget, e); } }); } return fetchRequestMap.size(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionsNoneCommittedNoResetStrategy() { Set<TopicPartition> tps = new HashSet<>(Arrays.asList(tp1, tp2)); subscriptionsNoAutoReset.assignFromUser(tps); try { fetcherNoAutoReset.updateFetchPositions(tps); fail("Should have thrown NoOffsetForPartitionException"); } catch (NoOffsetForPartitionException e) { Set<TopicPartition> partitions = e.partitions(); assertEquals(tps, partitions); } }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionToCommitted() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(5)); fetcher.updateFetchPositions(singleton(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionResetToDefaultOffset() { subscriptions.assignFromUser(singleton(tp1)); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.EARLIEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void timeToConnect() { Schema schema = Time.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 14400000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1 }, \"payload\": 14400000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testUpdateFetchPositionResetToLatestOffset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testListOffsetsSendsIsolationLevel() { for (final IsolationLevel isolationLevel : IsolationLevel.values()) { Fetcher<byte[], byte[]> fetcher = createFetcher(subscriptions, new Metrics(), new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, isolationLevel); subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { ListOffsetRequest request = (ListOffsetRequest) body; return request.isolationLevel() == isolationLevel; } }, listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); } }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionResetToEarliestOffset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.EARLIEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.EARLIEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionDisconnect() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L), true); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 5L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertTrue(subscriptions.isFetchable(tp1)); assertEquals(5, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionOfPausedPartitionsRequiringOffsetReset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(0)); subscriptions.pause(tp1); subscriptions.needOffsetReset(tp1, OffsetResetStrategy.LATEST); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.LATEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 10L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(10, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionOfPausedPartitionsWithoutACommittedOffset() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.pause(tp1); client.prepareResponse(listOffsetRequestMatcher(ListOffsetRequest.EARLIEST_TIMESTAMP), listOffsetResponse(Errors.NONE, 1L, 0L)); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(0, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionOfPausedPartitionsWithoutAValidPosition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(0)); subscriptions.pause(tp1); subscriptions.seek(tp1, 10); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(10, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testUpdateFetchPositionOfPausedPartitionsWithAValidPosition() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.committed(tp1, new OffsetAndMetadata(0)); subscriptions.seek(tp1, 10); subscriptions.pause(tp1); fetcher.updateFetchPositions(singleton(tp1)); assertFalse(subscriptions.isOffsetResetNeeded(tp1)); assertFalse(subscriptions.isFetchable(tp1)); assertTrue(subscriptions.hasValidPosition(tp1)); assertEquals(10, subscriptions.position(tp1).longValue()); }
public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public void updateFetchPositions(Set<TopicPartition> partitions) { final Set<TopicPartition> needsOffsetReset = new HashSet<>(); for (TopicPartition tp : partitions) { if (!subscriptions.isAssigned(tp) || subscriptions.hasValidPosition(tp)) continue; if (subscriptions.isOffsetResetNeeded(tp)) { needsOffsetReset.add(tp); } else if (subscriptions.committed(tp) == null) { subscriptions.needOffsetReset(tp); needsOffsetReset.add(tp); } else { long committed = subscriptions.committed(tp).offset(); log.debug("Resetting offset for partition {} to the committed offset {}", tp, committed); subscriptions.seek(tp, committed); } } if (!needsOffsetReset.isEmpty()) { resetOffsets(needsOffsetReset); } } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetAllTopics() { client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = fetcher.getAllTopicMetadata(5000L); assertEquals(cluster.topics().size(), allTopics.size()); }
public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetAllTopicsDisconnect() { client.prepareResponse(null, true); client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> allTopics = fetcher.getAllTopicMetadata(5000L); assertEquals(cluster.topics().size(), allTopics.size()); }
public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void timeToConnectOptional() { Schema schema = Time.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test(expected = TimeoutException.class) public void testGetAllTopicsTimeout() { fetcher.getAllTopicMetadata(50L); }
public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetAllTopicsUnauthorized() { client.prepareResponse(newMetadataResponse(topicName, Errors.TOPIC_AUTHORIZATION_FAILED)); try { fetcher.getAllTopicMetadata(10L); fail(); } catch (TopicAuthorizationException e) { assertEquals(singleton(topicName), e.unauthorizedTopics()); } }
public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout) { return getTopicMetadata(MetadataRequest.Builder.allTopics(), timeout); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test(expected = InvalidTopicException.class) public void testGetTopicMetadataInvalidTopic() { client.prepareResponse(newMetadataResponse(topicName, Errors.INVALID_TOPIC_EXCEPTION)); fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); }
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetTopicMetadataUnknownTopic() { client.prepareResponse(newMetadataResponse(topicName, Errors.UNKNOWN_TOPIC_OR_PARTITION)); Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); assertNull(topicMetadata.get(topicName)); }
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetTopicMetadataLeaderNotAvailable() { client.prepareResponse(newMetadataResponse(topicName, Errors.LEADER_NOT_AVAILABLE)); client.prepareResponse(newMetadataResponse(topicName, Errors.NONE)); Map<String, List<PartitionInfo>> topicMetadata = fetcher.getTopicMetadata( new MetadataRequest.Builder(Collections.singletonList(topicName), true), 5000L); assertTrue(topicMetadata.containsKey(topicName)); }
public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout) { if (!request.isAllTopics() && request.topics().isEmpty()) return Collections.emptyMap(); long start = time.milliseconds(); long remaining = timeout; do { RequestFuture<ClientResponse> future = sendMetadataRequest(request); client.poll(future, remaining); if (future.failed() && !future.isRetriable()) throw future.exception(); if (future.succeeded()) { MetadataResponse response = (MetadataResponse) future.value().responseBody(); Cluster cluster = response.cluster(); Set<String> unauthorizedTopics = cluster.unauthorizedTopics(); if (!unauthorizedTopics.isEmpty()) throw new TopicAuthorizationException(unauthorizedTopics); boolean shouldRetry = false; Map<String, Errors> errors = response.errors(); if (!errors.isEmpty()) { log.debug("Topic metadata fetch included errors: {}", errors); for (Map.Entry<String, Errors> errorEntry : errors.entrySet()) { String topic = errorEntry.getKey(); Errors error = errorEntry.getValue(); if (error == Errors.INVALID_TOPIC_EXCEPTION) throw new InvalidTopicException("Topic '" + topic + "' is invalid"); else if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) continue; else if (error.exception() instanceof RetriableException) shouldRetry = true; else throw new KafkaException("Unexpected error fetching metadata for topic " + topic, error.exception()); } } if (!shouldRetry) { HashMap<String, List<PartitionInfo>> topicsPartitionInfos = new HashMap<>(); for (String topic : cluster.topics()) topicsPartitionInfos.put(topic, cluster.availablePartitionsForTopic(topic)); return topicsPartitionInfos; } } long elapsed = time.milliseconds() - start; remaining = timeout - elapsed; if (remaining > 0) { long backoff = Math.min(remaining, retryBackoffMs); time.sleep(backoff); remaining -= backoff; } } while (remaining > 0); throw new TimeoutException("Timeout expired while fetching topic metadata"); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetcherMetrics() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); MetricName partitionLagMetric = metrics.metricName(tp1 + ".records-lag", metricGroup); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON); fetchRecords(tp1, MemoryRecords.EMPTY, Errors.NONE, 100L, 0); assertEquals(100, recordsFetchLagMax.value(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(100, partitionLag.value(), EPSILON); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp1, builder.build(), Errors.NONE, 200L, 0); assertEquals(197, recordsFetchLagMax.value(), EPSILON); assertEquals(197, partitionLag.value(), EPSILON); subscriptions.unsubscribe(); assertFalse(allMetrics.containsKey(partitionLagMetric)); }
private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testReadCommittedLagMetric() { Metrics metrics = new Metrics(); fetcher = createFetcher(subscriptions, metrics, new ByteArrayDeserializer(), new ByteArrayDeserializer(), Integer.MAX_VALUE, IsolationLevel.READ_COMMITTED); subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); MetricName maxLagMetric = metrics.metricInstance(metricsRegistry.recordsLagMax); MetricName partitionLagMetric = metrics.metricName(tp1 + ".records-lag", metricGroup); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric recordsFetchLagMax = allMetrics.get(maxLagMetric); assertEquals(Double.NEGATIVE_INFINITY, recordsFetchLagMax.value(), EPSILON); fetchRecords(tp1, MemoryRecords.EMPTY, Errors.NONE, 100L, 50L, 0); assertEquals(50, recordsFetchLagMax.value(), EPSILON); KafkaMetric partitionLag = allMetrics.get(partitionLagMetric); assertEquals(50, partitionLag.value(), EPSILON); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); fetchRecords(tp1, builder.build(), Errors.NONE, 200L, 150L, 0); assertEquals(147, recordsFetchLagMax.value(), EPSILON); assertEquals(147, partitionLag.value(), EPSILON); subscriptions.unsubscribe(); assertFalse(allMetrics.containsKey(partitionLagMetric)); }
private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetchResponseMetrics() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 0); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); int expectedBytes = 0; for (Record record : records.records()) expectedBytes += record.sizeInBytes(); fetchRecords(tp1, records, Errors.NONE, 100L, 0); assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON); assertEquals(3, recordsCountAverage.value(), EPSILON); }
private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testFetchResponseMetricsPartialResponse() { subscriptions.assignFromUser(singleton(tp1)); subscriptions.seek(tp1, 1); Map<MetricName, KafkaMetric> allMetrics = metrics.metrics(); KafkaMetric fetchSizeAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.fetchSizeAvg)); KafkaMetric recordsCountAverage = allMetrics.get(metrics.metricInstance(metricsRegistry.recordsPerRequestAvg)); MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), CompressionType.NONE, TimestampType.CREATE_TIME, 0L); for (int v = 0; v < 3; v++) builder.appendWithOffset(v, RecordBatch.NO_TIMESTAMP, "key".getBytes(), ("value-" + v).getBytes()); MemoryRecords records = builder.build(); int expectedBytes = 0; for (Record record : records.records()) { if (record.offset() >= 1) expectedBytes += record.sizeInBytes(); } fetchRecords(tp1, records, Errors.NONE, 100L, 0); assertEquals(expectedBytes, fetchSizeAverage.value(), EPSILON); assertEquals(2, recordsCountAverage.value(), EPSILON); }
private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { private List<ConsumerRecord<K, V>> fetchRecords(PartitionRecords partitionRecords, int maxRecords) { if (!subscriptions.isAssigned(partitionRecords.partition)) { log.debug("Not returning fetched records for partition {} since it is no longer assigned", partitionRecords.partition); } else { long position = subscriptions.position(partitionRecords.partition); if (!subscriptions.isFetchable(partitionRecords.partition)) { log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", partitionRecords.partition); } else if (partitionRecords.nextFetchOffset == position) { List<ConsumerRecord<K, V>> partRecords = partitionRecords.fetchRecords(maxRecords); long nextOffset = partitionRecords.nextFetchOffset; log.trace("Returning fetched records at offset {} for assigned partition {} and update " + "position to {}", position, partitionRecords.partition, nextOffset); subscriptions.position(partitionRecords.partition, nextOffset); Long partitionLag = subscriptions.partitionLag(partitionRecords.partition, isolationLevel); if (partitionLag != null) this.sensors.recordPartitionLag(partitionRecords.partition, partitionLag); return partRecords; } else { log.debug("Ignoring fetched records for {} at offset {} since the current position is {}", partitionRecords.partition, partitionRecords.nextFetchOffset, position); } } partitionRecords.drain(); return emptyList(); } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void testGetOffsetsForTimesTimeout() { try { fetcher.getOffsetsByTimes(Collections.singletonMap(new TopicPartition(topicName, 2), 1000L), 100L); fail("Should throw timeout exception."); } catch (TimeoutException e) { } }
public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test public void timeToConnectWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Time.builder().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testGetOffsetsForTimes() { assertTrue(fetcher.getOffsetsByTimes(new HashMap<TopicPartition, Long>(), 100L).isEmpty()); testGetOffsetsForTimesWithError(Errors.NONE, Errors.NONE, -1L, 100L, null, 100L); testGetOffsetsForTimesWithError(Errors.NONE, Errors.NONE, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.NOT_LEADER_FOR_PARTITION, Errors.INVALID_REQUEST, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.NONE, Errors.NOT_LEADER_FOR_PARTITION, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.NOT_LEADER_FOR_PARTITION, Errors.NONE, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.NONE, 10L, 100L, 10L, 100L); testGetOffsetsForTimesWithError(Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT, Errors.NONE, 10L, 100L, null, 100L); testGetOffsetsForTimesWithError(Errors.BROKER_NOT_AVAILABLE, Errors.NONE, 10L, 100L, 10L, 100L); }
public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test(expected = TimeoutException.class) public void testBatchedListOffsetsMetadataErrors() { Map<TopicPartition, ListOffsetResponse.PartitionData> partitionData = new HashMap<>(); partitionData.put(tp1, new ListOffsetResponse.PartitionData(Errors.NOT_LEADER_FOR_PARTITION, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET)); partitionData.put(tp2, new ListOffsetResponse.PartitionData(Errors.UNKNOWN_TOPIC_OR_PARTITION, ListOffsetResponse.UNKNOWN_TIMESTAMP, ListOffsetResponse.UNKNOWN_OFFSET)); client.prepareResponse(new ListOffsetResponse(0, partitionData)); Map<TopicPartition, Long> offsetsToSearch = new HashMap<>(); offsetsToSearch.put(tp1, ListOffsetRequest.EARLIEST_TIMESTAMP); offsetsToSearch.put(tp2, ListOffsetRequest.EARLIEST_TIMESTAMP); fetcher.getOffsetsByTimes(offsetsToSearch, 0); }
public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
Fetcher implements SubscriptionState.Listener, Closeable { public Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout) { Map<TopicPartition, OffsetData> offsetData = retrieveOffsetsByTimes(timestampsToSearch, timeout, true); HashMap<TopicPartition, OffsetAndTimestamp> offsetsByTimes = new HashMap<>(offsetData.size()); for (Map.Entry<TopicPartition, OffsetData> entry : offsetData.entrySet()) { OffsetData data = entry.getValue(); if (data == null) offsetsByTimes.put(entry.getKey(), null); else offsetsByTimes.put(entry.getKey(), new OffsetAndTimestamp(data.offset, data.timestamp)); } return offsetsByTimes; } Fetcher(ConsumerNetworkClient client, int minBytes, int maxBytes, int maxWaitMs, int fetchSize, int maxPollRecords, boolean checkCrcs, Deserializer<K> keyDeserializer, Deserializer<V> valueDeserializer, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, FetcherMetricsRegistry metricsRegistry, Time time, long retryBackoffMs, IsolationLevel isolationLevel); boolean hasCompletedFetches(); int sendFetches(); void resetOffsetsIfNeeded(Set<TopicPartition> partitions); void updateFetchPositions(Set<TopicPartition> partitions); Map<String, List<PartitionInfo>> getAllTopicMetadata(long timeout); Map<String, List<PartitionInfo>> getTopicMetadata(MetadataRequest.Builder request, long timeout); Map<TopicPartition, OffsetAndTimestamp> getOffsetsByTimes(Map<TopicPartition, Long> timestampsToSearch, long timeout); Map<TopicPartition, Long> beginningOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, Long> endOffsets(Collection<TopicPartition> partitions, long timeout); Map<TopicPartition, List<ConsumerRecord<K, V>>> fetchedRecords(); @Override void onAssignment(Set<TopicPartition> assignment); static Sensor throttleTimeSensor(Metrics metrics, FetcherMetricsRegistry metricsRegistry); @Override void close(); }
@Test(expected = IllegalArgumentException.class) public void testRuntimeExceptionInComplete() { RequestFuture<Exception> future = new RequestFuture<>(); future.complete(new RuntimeException()); }
public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
@Test(expected = IllegalStateException.class) public void invokeCompleteAfterAlreadyComplete() { RequestFuture<Void> future = new RequestFuture<>(); future.complete(null); future.complete(null); }
public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void complete(T value) { try { if (value instanceof RuntimeException) throw new IllegalArgumentException("The argument to complete can not be an instance of RuntimeException"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, value)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireSuccess(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
@Test(expected = IllegalStateException.class) public void invokeRaiseAfterAlreadyFailed() { RequestFuture<Void> future = new RequestFuture<>(); future.raise(new RuntimeException()); future.raise(new RuntimeException()); }
public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } } }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
RequestFuture implements ConsumerNetworkClient.PollCondition { public void raise(RuntimeException e) { try { if (e == null) throw new IllegalArgumentException("The exception passed to raise must not be null"); if (!result.compareAndSet(INCOMPLETE_SENTINEL, e)) throw new IllegalStateException("Invalid attempt to complete a request future which is already complete"); fireFailure(); } finally { completedLatch.countDown(); } } boolean isDone(); boolean awaitDone(long timeout, TimeUnit unit); @SuppressWarnings("unchecked") T value(); boolean succeeded(); boolean failed(); boolean isRetriable(); RuntimeException exception(); void complete(T value); void raise(RuntimeException e); void raise(Errors error); void addListener(RequestFutureListener<T> listener); RequestFuture<S> compose(final RequestFutureAdapter<T, S> adapter); void chain(final RequestFuture<T> future); static RequestFuture<T> failure(RuntimeException e); static RequestFuture<Void> voidSuccess(); static RequestFuture<T> coordinatorNotAvailable(); static RequestFuture<T> leaderNotAvailable(); static RequestFuture<T> noBrokersAvailable(); static RequestFuture<T> staleMetadata(); @Override boolean shouldBlock(); }
@Test public void deserializeNewSubscriptionVersion() { short version = 100; Schema subscriptionSchemaV100 = new Schema( new Field(ConsumerProtocol.TOPICS_KEY_NAME, new ArrayOf(Type.STRING)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING)); Struct subscriptionV100 = new Struct(subscriptionSchemaV100); subscriptionV100.set(ConsumerProtocol.TOPICS_KEY_NAME, new Object[]{"topic"}); subscriptionV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0])); subscriptionV100.set("foo", "bar"); Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA); headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version); ByteBuffer buffer = ByteBuffer.allocate(subscriptionV100.sizeOf() + headerV100.sizeOf()); headerV100.writeTo(buffer); subscriptionV100.writeTo(buffer); buffer.flip(); Subscription subscription = ConsumerProtocol.deserializeSubscription(buffer); assertEquals(Arrays.asList("topic"), subscription.topics()); }
public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<String> topics = new ArrayList<>(); for (Object topicObj : struct.getArray(TOPICS_KEY_NAME)) topics.add((String) topicObj); return new PartitionAssignor.Subscription(topics, userData); }
ConsumerProtocol { public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<String> topics = new ArrayList<>(); for (Object topicObj : struct.getArray(TOPICS_KEY_NAME)) topics.add((String) topicObj); return new PartitionAssignor.Subscription(topics, userData); } }
ConsumerProtocol { public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<String> topics = new ArrayList<>(); for (Object topicObj : struct.getArray(TOPICS_KEY_NAME)) topics.add((String) topicObj); return new PartitionAssignor.Subscription(topics, userData); } }
ConsumerProtocol { public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<String> topics = new ArrayList<>(); for (Object topicObj : struct.getArray(TOPICS_KEY_NAME)) topics.add((String) topicObj); return new PartitionAssignor.Subscription(topics, userData); } static ByteBuffer serializeSubscription(PartitionAssignor.Subscription subscription); static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer); static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer); static ByteBuffer serializeAssignment(PartitionAssignor.Assignment assignment); }
ConsumerProtocol { public static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = SUBSCRIPTION_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<String> topics = new ArrayList<>(); for (Object topicObj : struct.getArray(TOPICS_KEY_NAME)) topics.add((String) topicObj); return new PartitionAssignor.Subscription(topics, userData); } static ByteBuffer serializeSubscription(PartitionAssignor.Subscription subscription); static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer); static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer); static ByteBuffer serializeAssignment(PartitionAssignor.Assignment assignment); static final String PROTOCOL_TYPE; static final String VERSION_KEY_NAME; static final String TOPICS_KEY_NAME; static final String TOPIC_KEY_NAME; static final String PARTITIONS_KEY_NAME; static final String TOPIC_PARTITIONS_KEY_NAME; static final String USER_DATA_KEY_NAME; static final short CONSUMER_PROTOCOL_V0; static final Schema CONSUMER_PROTOCOL_HEADER_SCHEMA; static final Schema SUBSCRIPTION_V0; static final Schema TOPIC_ASSIGNMENT_V0; static final Schema ASSIGNMENT_V0; }
@Test public void deserializeNewAssignmentVersion() { short version = 100; Schema assignmentSchemaV100 = new Schema( new Field(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new ArrayOf(ConsumerProtocol.TOPIC_ASSIGNMENT_V0)), new Field(ConsumerProtocol.USER_DATA_KEY_NAME, Type.BYTES), new Field("foo", Type.STRING)); Struct assignmentV100 = new Struct(assignmentSchemaV100); assignmentV100.set(ConsumerProtocol.TOPIC_PARTITIONS_KEY_NAME, new Object[]{new Struct(ConsumerProtocol.TOPIC_ASSIGNMENT_V0) .set(ConsumerProtocol.TOPIC_KEY_NAME, "foo") .set(ConsumerProtocol.PARTITIONS_KEY_NAME, new Object[]{1})}); assignmentV100.set(ConsumerProtocol.USER_DATA_KEY_NAME, ByteBuffer.wrap(new byte[0])); assignmentV100.set("foo", "bar"); Struct headerV100 = new Struct(ConsumerProtocol.CONSUMER_PROTOCOL_HEADER_SCHEMA); headerV100.set(ConsumerProtocol.VERSION_KEY_NAME, version); ByteBuffer buffer = ByteBuffer.allocate(assignmentV100.sizeOf() + headerV100.sizeOf()); headerV100.writeTo(buffer); assignmentV100.writeTo(buffer); buffer.flip(); PartitionAssignor.Assignment assignment = ConsumerProtocol.deserializeAssignment(buffer); assertEquals(toSet(Arrays.asList(new TopicPartition("foo", 1))), toSet(assignment.partitions())); }
public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } return new PartitionAssignor.Assignment(partitions, userData); }
ConsumerProtocol { public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } return new PartitionAssignor.Assignment(partitions, userData); } }
ConsumerProtocol { public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } return new PartitionAssignor.Assignment(partitions, userData); } }
ConsumerProtocol { public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } return new PartitionAssignor.Assignment(partitions, userData); } static ByteBuffer serializeSubscription(PartitionAssignor.Subscription subscription); static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer); static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer); static ByteBuffer serializeAssignment(PartitionAssignor.Assignment assignment); }
ConsumerProtocol { public static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer) { Struct header = CONSUMER_PROTOCOL_HEADER_SCHEMA.read(buffer); Short version = header.getShort(VERSION_KEY_NAME); checkVersionCompatibility(version); Struct struct = ASSIGNMENT_V0.read(buffer); ByteBuffer userData = struct.getBytes(USER_DATA_KEY_NAME); List<TopicPartition> partitions = new ArrayList<>(); for (Object structObj : struct.getArray(TOPIC_PARTITIONS_KEY_NAME)) { Struct assignment = (Struct) structObj; String topic = assignment.getString(TOPIC_KEY_NAME); for (Object partitionObj : assignment.getArray(PARTITIONS_KEY_NAME)) { Integer partition = (Integer) partitionObj; partitions.add(new TopicPartition(topic, partition)); } } return new PartitionAssignor.Assignment(partitions, userData); } static ByteBuffer serializeSubscription(PartitionAssignor.Subscription subscription); static PartitionAssignor.Subscription deserializeSubscription(ByteBuffer buffer); static PartitionAssignor.Assignment deserializeAssignment(ByteBuffer buffer); static ByteBuffer serializeAssignment(PartitionAssignor.Assignment assignment); static final String PROTOCOL_TYPE; static final String VERSION_KEY_NAME; static final String TOPICS_KEY_NAME; static final String TOPIC_KEY_NAME; static final String PARTITIONS_KEY_NAME; static final String TOPIC_PARTITIONS_KEY_NAME; static final String USER_DATA_KEY_NAME; static final short CONSUMER_PROTOCOL_V0; static final Schema CONSUMER_PROTOCOL_HEADER_SCHEMA; static final Schema SUBSCRIPTION_V0; static final Schema TOPIC_ASSIGNMENT_V0; static final Schema ASSIGNMENT_V0; }
@Test public void testNormalHeartbeat() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NONE)); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.succeeded()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = GroupAuthorizationException.class) public void testGroupReadUnauthorized() { subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupLeaderResponse(0, "memberId", Collections.<String, List<String>>emptyMap(), Errors.GROUP_AUTHORIZATION_FAILED)); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testCoordinatorNotAvailable() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.COORDINATOR_NOT_AVAILABLE)); time.sleep(sessionTimeoutMs); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.failed()); assertEquals(Errors.COORDINATOR_NOT_AVAILABLE.exception(), future.exception()); assertTrue(coordinator.coordinatorUnknown()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void timeToConnectOptionalWithDefaultValue() { java.util.Date reference = new java.util.Date(0); Schema schema = Time.builder().optional().defaultValue(reference).schema(); String msg = "{ \"schema\": { \"type\": \"int32\", \"name\": \"org.apache.kafka.connect.data.Time\", \"version\": 1, \"optional\": true, \"default\": 0 }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testNotCoordinator() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NOT_COORDINATOR)); time.sleep(sessionTimeoutMs); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.failed()); assertEquals(Errors.NOT_COORDINATOR.exception(), future.exception()); assertTrue(coordinator.coordinatorUnknown()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testCoordinatorDisconnect() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(sessionTimeoutMs); RequestFuture<Void> future = coordinator.sendHeartbeatRequest(); assertEquals(1, consumerClient.pendingRequestCount()); assertFalse(future.isDone()); client.prepareResponse(heartbeatResponse(Errors.NONE), true); time.sleep(sessionTimeoutMs); consumerClient.poll(0); assertTrue(future.isDone()); assertTrue(future.failed()); assertTrue(future.exception() instanceof DisconnectException); assertTrue(coordinator.coordinatorUnknown()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = ApiException.class) public void testJoinGroupInvalidGroupId() { final String consumerId = "leader"; subscriptions.subscribe(singleton(topic1), rebalanceListener); metadata.setTopics(singletonList(topic1)); metadata.update(cluster, Collections.<String>emptySet(), time.milliseconds()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupLeaderResponse(0, consumerId, Collections.<String, List<String>>emptyMap(), Errors.INVALID_GROUP_ID)); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testNormalJoinGroupFollower() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { SyncGroupRequest sync = (SyncGroupRequest) body; return sync.memberId().equals(consumerId) && sync.generationId() == 1 && sync.groupAssignment().isEmpty(); } }, syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); assertEquals(singleton(topic1), subscriptions.groupSubscription()); assertEquals(1, rebalanceListener.revokedCount); assertEquals(Collections.emptySet(), rebalanceListener.revoked); assertEquals(1, rebalanceListener.assignedCount); assertEquals(singleton(t1p), rebalanceListener.assigned); }
@Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testPatternJoinGroupFollower() { final String consumerId = "consumer"; subscriptions.subscribe(Pattern.compile("test.*"), rebalanceListener); metadata.setTopics(singletonList(topic1)); metadata.update(TestUtils.singletonCluster(topic1, 1), Collections.<String>emptySet(), time.milliseconds()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { SyncGroupRequest sync = (SyncGroupRequest) body; return sync.memberId().equals(consumerId) && sync.generationId() == 1 && sync.groupAssignment().isEmpty(); } }, syncGroupResponse(Arrays.asList(t1p, t2p), Errors.NONE)); client.prepareMetadataUpdate(cluster, Collections.<String>emptySet()); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(2, subscriptions.assignedPartitions().size()); assertEquals(2, subscriptions.subscription().size()); assertEquals(1, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertEquals(2, rebalanceListener.assigned.size()); }
@Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testLeaveGroupOnClose() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); final AtomicBoolean received = new AtomicBoolean(false); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { received.set(true); LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body; return leaveRequest.memberId().equals(consumerId) && leaveRequest.groupId().equals(groupId); } }, new LeaveGroupResponse(Errors.NONE)); coordinator.close(0); assertTrue(received.get()); }
public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } }
ConsumerCoordinator extends AbstractCoordinator { public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } } }
ConsumerCoordinator extends AbstractCoordinator { public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void close(long timeoutMs) { client.disableWakeups(); long now = time.milliseconds(); long endTimeMs = now + timeoutMs; try { maybeAutoCommitOffsetsSync(timeoutMs); now = time.milliseconds(); if (pendingAsyncCommits.get() > 0 && endTimeMs > now) { ensureCoordinatorReady(now, endTimeMs - now); now = time.milliseconds(); } } finally { super.close(Math.max(0, endTimeMs - now)); } } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testUnknownMemberIdOnSyncGroup() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.<TopicPartition>emptyList(), Errors.UNKNOWN_MEMBER_ID)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { JoinGroupRequest joinRequest = (JoinGroupRequest) body; return joinRequest.memberId().equals(JoinGroupRequest.UNKNOWN_MEMBER_ID); } }, joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); }
@Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testRebalanceInProgressOnSyncGroup() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.<TopicPartition>emptyList(), Errors.REBALANCE_IN_PROGRESS)); client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); }
@Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testIllegalGenerationOnSyncGroup() { final String consumerId = "consumer"; subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(Collections.<TopicPartition>emptyList(), Errors.ILLEGAL_GENERATION)); client.prepareResponse(new MockClient.RequestMatcher() { @Override public boolean matches(AbstractRequest body) { JoinGroupRequest joinRequest = (JoinGroupRequest) body; return joinRequest.memberId().equals(JoinGroupRequest.UNKNOWN_MEMBER_ID); } }, joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); }
@Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testDisconnectInJoin() { subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, "consumer", "leader", Errors.NONE), true); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(joinGroupFollowerResponse(1, "consumer", "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); assertFalse(coordinator.needRejoin()); assertEquals(singleton(t1p), subscriptions.assignedPartitions()); assertEquals(1, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertEquals(singleton(t1p), rebalanceListener.assigned); }
@Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { @Override public boolean needRejoin() { if (!subscriptions.partitionsAutoAssigned()) return false; if (assignmentSnapshot != null && !assignmentSnapshot.equals(metadataSnapshot)) return true; if (joinedSubscription != null && !joinedSubscription.equals(subscriptions.subscription())) return true; return super.needRejoin(); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void timestampToConnect() { Schema schema = Timestamp.SCHEMA; GregorianCalendar calendar = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0); calendar.setTimeZone(TimeZone.getTimeZone("UTC")); calendar.add(Calendar.MILLISECOND, 2000000000); calendar.add(Calendar.MILLISECOND, 2000000000); java.util.Date reference = calendar.getTime(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1 }, \"payload\": 4000000000 }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); java.util.Date converted = (java.util.Date) schemaAndValue.value(); assertEquals(schema, schemaAndValue.schema()); assertEquals(reference, converted); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void testAutoCommitDynamicAssignment() { final String consumerId = "consumer"; ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); subscriptions.seek(t1p, 100); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); time.sleep(autoCommitIntervalMs); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testAutoCommitDynamicAssignmentRebalance() { final String consumerId = "consumer"; ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.subscribe(singleton(topic1), rebalanceListener); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(autoCommitIntervalMs); consumerClient.poll(0); client.prepareResponse(joinGroupFollowerResponse(1, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(singletonList(t1p), Errors.NONE)); coordinator.joinGroupIfNeeded(); subscriptions.seek(t1p, 100); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); time.sleep(autoCommitIntervalMs); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testAutoCommitManualAssignment() { ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.assignFromUser(singleton(t1p)); subscriptions.seek(t1p, 100); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); time.sleep(autoCommitIntervalMs); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testAutoCommitManualAssignmentCoordinatorUnknown() { ConsumerCoordinator coordinator = buildCoordinator(new Metrics(), assignors, ConsumerConfig.DEFAULT_EXCLUDE_INTERNAL_TOPICS, true, true); subscriptions.assignFromUser(singleton(t1p)); subscriptions.seek(t1p, 100); consumerClient.poll(0); time.sleep(autoCommitIntervalMs); consumerClient.poll(0); assertNull(subscriptions.committed(t1p)); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); time.sleep(retryBackoffMs); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.poll(time.milliseconds(), Long.MAX_VALUE); assertEquals(100L, subscriptions.committed(t1p).offset()); }
public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public void poll(long now, long remainingMs) { invokeCompletedOffsetCommitCallbacks(); if (subscriptions.partitionsAutoAssigned()) { if (coordinatorUnknown()) { ensureCoordinatorReady(); now = time.milliseconds(); } if (needRejoin()) { if (subscriptions.hasPatternSubscription()) client.ensureFreshMetadata(); ensureActiveGroup(); now = time.milliseconds(); } } else { if (metadata.updateRequested() && !client.hasReadyNodes()) { boolean metadataUpdated = client.awaitMetadataUpdate(remainingMs); if (!metadataUpdated && !client.hasReadyNodes()) return; now = time.milliseconds(); } } pollHeartbeat(now); maybeAutoCommitOffsetsAsync(now); } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testCommitOffsetSyncNotCoordinator() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NOT_COORDINATOR))); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testCommitOffsetSyncCoordinatorNotAvailable() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.COORDINATOR_NOT_AVAILABLE))); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void testCommitOffsetSyncCoordinatorDisconnected() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE)), true); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.NONE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L)), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = KafkaException.class) public void testCommitUnknownTopicOrPartition() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.UNKNOWN_TOPIC_OR_PARTITION))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = OffsetMetadataTooLarge.class) public void testCommitOffsetMetadataTooLarge() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.OFFSET_METADATA_TOO_LARGE))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test(expected = CommitFailedException.class) public void testCommitOffsetIllegalGeneration() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(); client.prepareResponse(offsetCommitResponse(Collections.singletonMap(t1p, Errors.ILLEGAL_GENERATION))); coordinator.commitOffsetsSync(Collections.singletonMap(t1p, new OffsetAndMetadata(100L, "metadata")), Long.MAX_VALUE); }
public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
ConsumerCoordinator extends AbstractCoordinator { public boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs) { invokeCompletedOffsetCommitCallbacks(); if (offsets.isEmpty()) return true; long now = time.milliseconds(); long startMs = now; long remainingMs = timeoutMs; do { if (coordinatorUnknown()) { if (!ensureCoordinatorReady(now, remainingMs)) return false; remainingMs = timeoutMs - (time.milliseconds() - startMs); } RequestFuture<Void> future = sendOffsetCommitRequest(offsets); client.poll(future, remainingMs); if (future.succeeded()) { if (interceptors != null) interceptors.onCommit(offsets); return true; } if (!future.isRetriable()) throw future.exception(); time.sleep(retryBackoffMs); now = time.milliseconds(); remainingMs = timeoutMs - (now - startMs); } while (remainingMs > 0); return false; } ConsumerCoordinator(ConsumerNetworkClient client, String groupId, int rebalanceTimeoutMs, int sessionTimeoutMs, int heartbeatIntervalMs, List<PartitionAssignor> assignors, Metadata metadata, SubscriptionState subscriptions, Metrics metrics, String metricGrpPrefix, Time time, long retryBackoffMs, boolean autoCommitEnabled, int autoCommitIntervalMs, ConsumerInterceptors<?, ?> interceptors, boolean excludeInternalTopics, final boolean leaveGroupOnClose); @Override String protocolType(); @Override List<ProtocolMetadata> metadata(); void updatePatternSubscription(Cluster cluster); void poll(long now, long remainingMs); long timeToNextPoll(long now); @Override boolean needRejoin(); void refreshCommittedOffsetsIfNeeded(); Map<TopicPartition, OffsetAndMetadata> fetchCommittedOffsets(Set<TopicPartition> partitions); void close(long timeoutMs); void commitOffsetsAsync(final Map<TopicPartition, OffsetAndMetadata> offsets, final OffsetCommitCallback callback); boolean commitOffsetsSync(Map<TopicPartition, OffsetAndMetadata> offsets, long timeoutMs); void maybeAutoCommitOffsetsNow(); }
@Test public void timestampToConnectOptional() { Schema schema = Timestamp.builder().optional().schema(); String msg = "{ \"schema\": { \"type\": \"int64\", \"name\": \"org.apache.kafka.connect.data.Timestamp\", \"version\": 1, \"optional\": true }, \"payload\": null }"; SchemaAndValue schemaAndValue = converter.toConnectData(TOPIC, msg.getBytes()); assertEquals(schema, schemaAndValue.schema()); assertNull(schemaAndValue.value()); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
@Test public void byteToConnect() { assertEquals(new SchemaAndValue(Schema.INT8_SCHEMA, (byte) 12), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"int8\" }, \"payload\": 12 }".getBytes())); }
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }