target
stringlengths 20
113k
| src_fm
stringlengths 11
86.3k
| src_fm_fc
stringlengths 21
86.4k
| src_fm_fc_co
stringlengths 30
86.4k
| src_fm_fc_ms
stringlengths 42
86.8k
| src_fm_fc_ms_ff
stringlengths 43
86.8k
|
---|---|---|---|---|---|
@Test public void deserializeArpWithTwoVlanTest() throws PacketParsingException { short innerVlan = 1234; short outerVlan = 2345; Ethernet ethernet = buildEthernet(srcAndDstMacAddresses, ethTypeToByteArray(EthType.VLAN_FRAME), shortToByteArray(outerVlan), ethTypeToByteArray(EthType.VLAN_FRAME), shortToByteArray(innerVlan), ethTypeToByteArray(EthType.ARP), arpPacket); ArpPacketData data = service.deserializeArp(ethernet, null, 0); assertNotNull(data); assertEquals(2, data.getVlans().size()); assertEquals(Integer.valueOf(outerVlan), data.getVlans().get(0)); assertEquals(Integer.valueOf(innerVlan), data.getVlans().get(1)); assertEquals(buildArpPacket(arpPacket), data.getArp()); }
|
@VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } @Override Command makeCommand(CommandContext context, OfInput input); @Override void setup(FloodlightModuleContext context); }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } @Override Command makeCommand(CommandContext context, OfInput input); @Override void setup(FloodlightModuleContext context); }
|
@Test public void deserializeArpWithQinQVlansTest() throws PacketParsingException { short vlan1 = 1234; short vlan2 = 2345; short vlan3 = 4000; Ethernet ethernet = buildEthernet(srcAndDstMacAddresses, ethTypeToByteArray(EthType.Q_IN_Q), shortToByteArray(vlan1), ethTypeToByteArray(EthType.BRIDGING), shortToByteArray(vlan2), ethTypeToByteArray(EthType.VLAN_FRAME), shortToByteArray(vlan3), ethTypeToByteArray(EthType.ARP), arpPacket); ArpPacketData data = service.deserializeArp(ethernet, null, 0); assertNotNull(data); assertEquals(3, data.getVlans().size()); assertEquals(Integer.valueOf(vlan1), data.getVlans().get(0)); assertEquals(Integer.valueOf(vlan2), data.getVlans().get(1)); assertEquals(Integer.valueOf(vlan3), data.getVlans().get(2)); assertEquals(buildArpPacket(arpPacket), data.getArp()); }
|
@VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } @Override Command makeCommand(CommandContext context, OfInput input); @Override void setup(FloodlightModuleContext context); }
|
ConnectedDevicesService implements IService, IInputTranslator { @VisibleForTesting ArpPacketData deserializeArp(Ethernet eth, SwitchId switchId, long cookie) { try { List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(eth, vlans); if (payload instanceof ARP) { return new ArpPacketData((ARP) payload, vlans); } } catch (Exception exception) { logger.info("Could not deserialize ARP packet {} on switch {}. Cookie {}. Deserialization failure: {}", eth, switchId, Cookie.toString(cookie), exception.getMessage(), exception); return null; } logger.info("Got invalid ARP packet: {} on switch {}. Cookie {}", eth, switchId, cookie); return null; } @Override Command makeCommand(CommandContext context, OfInput input); @Override void setup(FloodlightModuleContext context); }
|
@Test public void shouldCreateConfigFromContextParameters() { FloodlightModuleContext context = new FloodlightModuleContext(); IFloodlightModule module = niceMock(IFloodlightModule.class); context.addConfigParam(module, "bootstrap-servers", TEST_BOOTSTRAP_SERVERS); FloodlightModuleConfigurationProvider provider = FloodlightModuleConfigurationProvider.of(context, module); KafkaChannelConfig kafkaConfig = provider.getConfiguration(KafkaChannelConfig.class); assertEquals(TEST_BOOTSTRAP_SERVERS, kafkaConfig.getBootstrapServers()); }
|
public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
IFloodlightModule module); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
Class<? extends IFloodlightModule> module); }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
IFloodlightModule module); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
Class<? extends IFloodlightModule> module); }
|
@Test public void shouldCreateEnvConfigFromContextParameters() { FloodlightModuleContext context = new FloodlightModuleContext(); IFloodlightModule module = niceMock(IFloodlightModule.class); context.addConfigParam(module, "environment-naming-prefix", TEST_PREFIX); FloodlightModuleConfigurationProvider provider = FloodlightModuleConfigurationProvider.of(context, module); EnvironmentFloodlightConfig environmentConfig = provider.getConfiguration(EnvironmentFloodlightConfig.class); assertEquals(TEST_PREFIX, environmentConfig.getNamingPrefix()); }
|
public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
IFloodlightModule module); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
Class<? extends IFloodlightModule> module); }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
IFloodlightModule module); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
Class<? extends IFloodlightModule> module); }
|
@Test public void shouldCreateConfigWithEnvPrefix() { FloodlightModuleContext context = new FloodlightModuleContext(); IFloodlightModule module = niceMock(IFloodlightModule.class); context.addConfigParam(module, "environment-naming-prefix", TEST_PREFIX); FloodlightModuleConfigurationProvider provider = FloodlightModuleConfigurationProvider.of(context, module); KafkaChannelConfig kafkaConsumerConfig = provider.getConfiguration(KafkaChannelConfig.class); assertEquals(TEST_PREFIX + "_floodlight", kafkaConsumerConfig.getGroupId()); }
|
public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
IFloodlightModule module); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
Class<? extends IFloodlightModule> module); }
|
FloodlightModuleConfigurationProvider extends ValidatingConfigurationProvider { public static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext, IFloodlightModule module) { Map<String, String> configData = moduleContext.getConfigParams(module); FloodlightModuleConfigurationProvider provider = new FloodlightModuleConfigurationProvider(configData); dumpConfigData(module.getClass(), configData); return provider; } protected FloodlightModuleConfigurationProvider(Map<String, String> configData); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
IFloodlightModule module); static FloodlightModuleConfigurationProvider of(FloodlightModuleContext moduleContext,
Class<? extends IFloodlightModule> module); }
|
@Test public void success() throws Exception { final PingService realPingService = new PingService(); moduleContext.addService(PingService.class, realPingService); final ISwitchManager realSwitchManager = new SwitchManager(); moduleContext.addService(ISwitchManager.class, realSwitchManager); InputService inputService = createMock(InputService.class); moduleContext.addService(InputService.class, inputService); inputService.addTranslator(eq(OFType.PACKET_IN), anyObject()); replayAll(); final DatapathId dpIdBeta = DatapathId.of(0x0000fffe000002L); final Ping ping = new Ping(new NetworkEndpoint(new SwitchId(dpIdBeta.getLong()), 8), new NetworkEndpoint(new SwitchId(dpId.getLong()), 9), new FlowTransitEncapsulation(2, FlowEncapsulationType.TRANSIT_VLAN), 3); final PingData payload = PingData.of(ping); moduleContext.addConfigParam(new PathVerificationService(), "hmac256-secret", "secret"); realPingService.setup(moduleContext); byte[] signedPayload = realPingService.getSignature().sign(payload); byte[] wireData = realPingService.wrapData(ping, signedPayload).serialize(); OFFactory ofFactory = new OFFactoryVer13(); OFPacketIn message = ofFactory.buildPacketIn() .setReason(OFPacketInReason.ACTION).setXid(1L) .setCookie(PingService.OF_CATCH_RULE_COOKIE) .setData(wireData) .build(); FloodlightContext metadata = new FloodlightContext(); IPacket decodedEthernet = new Ethernet().deserialize(wireData, 0, wireData.length); Assert.assertTrue(decodedEthernet instanceof Ethernet); IFloodlightProviderService.bcStore.put( metadata, IFloodlightProviderService.CONTEXT_PI_PAYLOAD, (Ethernet) decodedEthernet); OfInput input = new OfInput(iofSwitch, message, metadata); final PingResponseCommand command = makeCommand(input); command.call(); final List<Message> replies = kafkaMessageCatcher.getValues(); Assert.assertEquals(1, replies.size()); InfoMessage response = (InfoMessage) replies.get(0); PingResponse pingResponse = (PingResponse) response.getData(); Assert.assertNull(pingResponse.getError()); Assert.assertNotNull(pingResponse.getMeters()); Assert.assertEquals(payload.getPingId(), pingResponse.getPingId()); }
|
@Override public Command call() { log.debug("{} - {}", getClass().getCanonicalName(), input); byte[] payload = unwrap(); if (payload == null) { return null; } log.info("Receive flow ping packet from switch {} OF-xid:{}", input.getDpId(), input.getMessage().getXid()); try { PingData pingData = decode(payload); getContext().setCorrelationId(pingData.getPingId().toString()); process(pingData); } catch (CorruptedNetworkDataException e) { logPing.error(String.format("dpid:%s %s", input.getDpId(), e)); } return null; }
|
PingResponseCommand extends PingCommand { @Override public Command call() { log.debug("{} - {}", getClass().getCanonicalName(), input); byte[] payload = unwrap(); if (payload == null) { return null; } log.info("Receive flow ping packet from switch {} OF-xid:{}", input.getDpId(), input.getMessage().getXid()); try { PingData pingData = decode(payload); getContext().setCorrelationId(pingData.getPingId().toString()); process(pingData); } catch (CorruptedNetworkDataException e) { logPing.error(String.format("dpid:%s %s", input.getDpId(), e)); } return null; } }
|
PingResponseCommand extends PingCommand { @Override public Command call() { log.debug("{} - {}", getClass().getCanonicalName(), input); byte[] payload = unwrap(); if (payload == null) { return null; } log.info("Receive flow ping packet from switch {} OF-xid:{}", input.getDpId(), input.getMessage().getXid()); try { PingData pingData = decode(payload); getContext().setCorrelationId(pingData.getPingId().toString()); process(pingData); } catch (CorruptedNetworkDataException e) { logPing.error(String.format("dpid:%s %s", input.getDpId(), e)); } return null; } PingResponseCommand(CommandContext context, OfInput input); }
|
PingResponseCommand extends PingCommand { @Override public Command call() { log.debug("{} - {}", getClass().getCanonicalName(), input); byte[] payload = unwrap(); if (payload == null) { return null; } log.info("Receive flow ping packet from switch {} OF-xid:{}", input.getDpId(), input.getMessage().getXid()); try { PingData pingData = decode(payload); getContext().setCorrelationId(pingData.getPingId().toString()); process(pingData); } catch (CorruptedNetworkDataException e) { logPing.error(String.format("dpid:%s %s", input.getDpId(), e)); } return null; } PingResponseCommand(CommandContext context, OfInput input); @Override Command call(); }
|
PingResponseCommand extends PingCommand { @Override public Command call() { log.debug("{} - {}", getClass().getCanonicalName(), input); byte[] payload = unwrap(); if (payload == null) { return null; } log.info("Receive flow ping packet from switch {} OF-xid:{}", input.getDpId(), input.getMessage().getXid()); try { PingData pingData = decode(payload); getContext().setCorrelationId(pingData.getPingId().toString()); process(pingData); } catch (CorruptedNetworkDataException e) { logPing.error(String.format("dpid:%s %s", input.getDpId(), e)); } return null; } PingResponseCommand(CommandContext context, OfInput input); @Override Command call(); }
|
@Test public void makeOuterVlanMatchSharedMessage() { final IngressFlowModFactory factory = makeFactory(); final IngressFlowSegmentBase command = factory.getCommand(); final FlowEndpoint endpoint = command.getEndpoint(); RoutingMetadata metadata = RoutingMetadata.builder() .outerVlanId(endpoint.getOuterVlanId()) .build(Collections.emptySet()); OFFlowAdd expected = of.buildFlowAdd() .setTableId(getTargetPreIngressTableId()) .setPriority(FlowSegmentCommand.FLOW_PRIORITY) .setCookie(U64.of( FlowSharedSegmentCookie.builder(SharedSegmentType.QINQ_OUTER_VLAN) .portNumber(endpoint.getPortNumber()) .vlanId(endpoint.getOuterVlanId()) .build().getValue())) .setMatch(OfAdapter.INSTANCE.matchVlanId(of, of.buildMatch(), endpoint.getOuterVlanId()) .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .build()) .setInstructions(ImmutableList.of( of.instructions().applyActions(Collections.singletonList(of.actions().popVlan())), of.instructions().writeMetadata(metadata.getValue(), metadata.getMask()), of.instructions().gotoTable(TableId.of(SwitchManager.INGRESS_TABLE_ID)))) .build(); verifyOfMessageEquals(expected, factory.makeOuterVlanMatchSharedMessage()); }
|
public OFFlowMod makeOuterVlanMatchSharedMessage() { FlowEndpoint endpoint = command.getEndpoint(); FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.QINQ_OUTER_VLAN) .portNumber(endpoint.getPortNumber()) .vlanId(endpoint.getOuterVlanId()) .build(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.PRE_INGRESS_TABLE_ID)) .setCookie(U64.of(cookie.getValue())) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .setExact(MatchField.VLAN_VID, OFVlanVidMatch.ofVlan(endpoint.getOuterVlanId())) .build()) .setInstructions(makeOuterVlanMatchInstructions()) .build(); }
|
IngressFlowModFactory { public OFFlowMod makeOuterVlanMatchSharedMessage() { FlowEndpoint endpoint = command.getEndpoint(); FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.QINQ_OUTER_VLAN) .portNumber(endpoint.getPortNumber()) .vlanId(endpoint.getOuterVlanId()) .build(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.PRE_INGRESS_TABLE_ID)) .setCookie(U64.of(cookie.getValue())) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .setExact(MatchField.VLAN_VID, OFVlanVidMatch.ofVlan(endpoint.getOuterVlanId())) .build()) .setInstructions(makeOuterVlanMatchInstructions()) .build(); } }
|
IngressFlowModFactory { public OFFlowMod makeOuterVlanMatchSharedMessage() { FlowEndpoint endpoint = command.getEndpoint(); FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.QINQ_OUTER_VLAN) .portNumber(endpoint.getPortNumber()) .vlanId(endpoint.getOuterVlanId()) .build(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.PRE_INGRESS_TABLE_ID)) .setCookie(U64.of(cookie.getValue())) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .setExact(MatchField.VLAN_VID, OFVlanVidMatch.ofVlan(endpoint.getOuterVlanId())) .build()) .setInstructions(makeOuterVlanMatchInstructions()) .build(); } IngressFlowModFactory(
OfFlowModBuilderFactory flowModBuilderFactory, IngressFlowSegmentBase command, IOFSwitch sw,
Set<SwitchFeature> features); }
|
IngressFlowModFactory { public OFFlowMod makeOuterVlanMatchSharedMessage() { FlowEndpoint endpoint = command.getEndpoint(); FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.QINQ_OUTER_VLAN) .portNumber(endpoint.getPortNumber()) .vlanId(endpoint.getOuterVlanId()) .build(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.PRE_INGRESS_TABLE_ID)) .setCookie(U64.of(cookie.getValue())) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .setExact(MatchField.VLAN_VID, OFVlanVidMatch.ofVlan(endpoint.getOuterVlanId())) .build()) .setInstructions(makeOuterVlanMatchInstructions()) .build(); } IngressFlowModFactory(
OfFlowModBuilderFactory flowModBuilderFactory, IngressFlowSegmentBase command, IOFSwitch sw,
Set<SwitchFeature> features); OFFlowMod makeOuterOnlyVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeSingleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDoubleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDefaultPortForwardMessage(MeterId effectiveMeterId); OFFlowMod makeOuterVlanMatchSharedMessage(); OFFlowMod makeOuterOnlyVlanServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeDefaultPortServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeCustomerPortSharedCatchMessage(); OFFlowMod makeLldpInputCustomerFlowMessage(); OFFlowMod makeArpInputCustomerFlowMessage(); Optional<OFFlowMod> makeServer42InputFlowMessage(int server42UpdPortOffset); }
|
IngressFlowModFactory { public OFFlowMod makeOuterVlanMatchSharedMessage() { FlowEndpoint endpoint = command.getEndpoint(); FlowSharedSegmentCookie cookie = FlowSharedSegmentCookie.builder(SharedSegmentType.QINQ_OUTER_VLAN) .portNumber(endpoint.getPortNumber()) .vlanId(endpoint.getOuterVlanId()) .build(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.PRE_INGRESS_TABLE_ID)) .setCookie(U64.of(cookie.getValue())) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .setExact(MatchField.VLAN_VID, OFVlanVidMatch.ofVlan(endpoint.getOuterVlanId())) .build()) .setInstructions(makeOuterVlanMatchInstructions()) .build(); } IngressFlowModFactory(
OfFlowModBuilderFactory flowModBuilderFactory, IngressFlowSegmentBase command, IOFSwitch sw,
Set<SwitchFeature> features); OFFlowMod makeOuterOnlyVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeSingleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDoubleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDefaultPortForwardMessage(MeterId effectiveMeterId); OFFlowMod makeOuterVlanMatchSharedMessage(); OFFlowMod makeOuterOnlyVlanServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeDefaultPortServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeCustomerPortSharedCatchMessage(); OFFlowMod makeLldpInputCustomerFlowMessage(); OFFlowMod makeArpInputCustomerFlowMessage(); Optional<OFFlowMod> makeServer42InputFlowMessage(int server42UpdPortOffset); }
|
@Test public void makeCustomerPortSharedCatchInstallMessage() { IngressFlowModFactory factory = makeFactory(); FlowEndpoint endpoint = factory.getCommand().getEndpoint(); OFFlowMod expected = of.buildFlowAdd() .setTableId(TableId.of(SwitchManager.INPUT_TABLE_ID)) .setPriority(SwitchManager.INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE) .setCookie(U64.of(Cookie.encodeIngressRulePassThrough(endpoint.getPortNumber()))) .setMatch(of.buildMatch().setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())).build()) .setInstructions(Collections.singletonList(of.instructions().gotoTable( TableId.of(SwitchManager.PRE_INGRESS_TABLE_ID)))) .build(); verifyOfMessageEquals(expected, factory.makeCustomerPortSharedCatchMessage()); }
|
public OFFlowMod makeCustomerPortSharedCatchMessage() { FlowEndpoint endpoint = command.getEndpoint(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.INPUT_TABLE_ID)) .setPriority(SwitchManager.INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE) .setCookie(U64.of(Cookie.encodeIngressRulePassThrough(endpoint.getPortNumber()))) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .build()) .setInstructions(makeCustomerPortSharedCatchInstructions()) .build(); }
|
IngressFlowModFactory { public OFFlowMod makeCustomerPortSharedCatchMessage() { FlowEndpoint endpoint = command.getEndpoint(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.INPUT_TABLE_ID)) .setPriority(SwitchManager.INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE) .setCookie(U64.of(Cookie.encodeIngressRulePassThrough(endpoint.getPortNumber()))) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .build()) .setInstructions(makeCustomerPortSharedCatchInstructions()) .build(); } }
|
IngressFlowModFactory { public OFFlowMod makeCustomerPortSharedCatchMessage() { FlowEndpoint endpoint = command.getEndpoint(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.INPUT_TABLE_ID)) .setPriority(SwitchManager.INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE) .setCookie(U64.of(Cookie.encodeIngressRulePassThrough(endpoint.getPortNumber()))) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .build()) .setInstructions(makeCustomerPortSharedCatchInstructions()) .build(); } IngressFlowModFactory(
OfFlowModBuilderFactory flowModBuilderFactory, IngressFlowSegmentBase command, IOFSwitch sw,
Set<SwitchFeature> features); }
|
IngressFlowModFactory { public OFFlowMod makeCustomerPortSharedCatchMessage() { FlowEndpoint endpoint = command.getEndpoint(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.INPUT_TABLE_ID)) .setPriority(SwitchManager.INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE) .setCookie(U64.of(Cookie.encodeIngressRulePassThrough(endpoint.getPortNumber()))) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .build()) .setInstructions(makeCustomerPortSharedCatchInstructions()) .build(); } IngressFlowModFactory(
OfFlowModBuilderFactory flowModBuilderFactory, IngressFlowSegmentBase command, IOFSwitch sw,
Set<SwitchFeature> features); OFFlowMod makeOuterOnlyVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeSingleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDoubleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDefaultPortForwardMessage(MeterId effectiveMeterId); OFFlowMod makeOuterVlanMatchSharedMessage(); OFFlowMod makeOuterOnlyVlanServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeDefaultPortServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeCustomerPortSharedCatchMessage(); OFFlowMod makeLldpInputCustomerFlowMessage(); OFFlowMod makeArpInputCustomerFlowMessage(); Optional<OFFlowMod> makeServer42InputFlowMessage(int server42UpdPortOffset); }
|
IngressFlowModFactory { public OFFlowMod makeCustomerPortSharedCatchMessage() { FlowEndpoint endpoint = command.getEndpoint(); return flowModBuilderFactory.makeBuilder(of, TableId.of(SwitchManager.INPUT_TABLE_ID)) .setPriority(SwitchManager.INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE) .setCookie(U64.of(Cookie.encodeIngressRulePassThrough(endpoint.getPortNumber()))) .setMatch(of.buildMatch() .setExact(MatchField.IN_PORT, OFPort.of(endpoint.getPortNumber())) .build()) .setInstructions(makeCustomerPortSharedCatchInstructions()) .build(); } IngressFlowModFactory(
OfFlowModBuilderFactory flowModBuilderFactory, IngressFlowSegmentBase command, IOFSwitch sw,
Set<SwitchFeature> features); OFFlowMod makeOuterOnlyVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeSingleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDoubleVlanForwardMessage(MeterId effectiveMeterId); OFFlowMod makeDefaultPortForwardMessage(MeterId effectiveMeterId); OFFlowMod makeOuterVlanMatchSharedMessage(); OFFlowMod makeOuterOnlyVlanServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeDefaultPortServer42IngressFlowMessage(int server42UpdPortOffset); OFFlowMod makeCustomerPortSharedCatchMessage(); OFFlowMod makeLldpInputCustomerFlowMessage(); OFFlowMod makeArpInputCustomerFlowMessage(); Optional<OFFlowMod> makeServer42InputFlowMessage(int server42UpdPortOffset); }
|
@Test public void extractEthernetPayloadTest() { short vlan1 = 1234; short vlan2 = 2345; short vlan3 = 4000; byte[] originPayload = new byte[]{0x55, (byte) 0xAA}; Ethernet ethernet = buildEthernet( new byte[]{ 0x01, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x01, 0x0B, 0x0B, 0x0B, 0x0B, 0x0B }, ethTypeToByteArray(EthType.Q_IN_Q), shortToByteArray(vlan1), ethTypeToByteArray(EthType.BRIDGING), shortToByteArray(vlan2), ethTypeToByteArray(EthType.VLAN_FRAME), shortToByteArray(vlan3), ethTypeToByteArray(EthType.IPv4), originPayload); List<Integer> vlans = new ArrayList<>(); IPacket payload = EthernetPacketToolbox.extractPayload(ethernet, vlans); assertEquals(3, vlans.size()); assertEquals(Integer.valueOf(vlan1), vlans.get(0)); assertEquals(Integer.valueOf(vlan2), vlans.get(1)); assertEquals(Integer.valueOf(vlan3), vlans.get(2)); assertArrayEquals(originPayload, payload.serialize()); }
|
public static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack) { short rootVlan = packet.getVlanID(); if (0 < rootVlan) { vlanStack.add((int) rootVlan); } IPacket payload = packet.getPayload(); while (payload instanceof VlanTag) { short vlanId = ((VlanTag) payload).getVlanId(); vlanStack.add((int) vlanId); payload = payload.getPayload(); } return payload; }
|
EthernetPacketToolbox { public static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack) { short rootVlan = packet.getVlanID(); if (0 < rootVlan) { vlanStack.add((int) rootVlan); } IPacket payload = packet.getPayload(); while (payload instanceof VlanTag) { short vlanId = ((VlanTag) payload).getVlanId(); vlanStack.add((int) vlanId); payload = payload.getPayload(); } return payload; } }
|
EthernetPacketToolbox { public static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack) { short rootVlan = packet.getVlanID(); if (0 < rootVlan) { vlanStack.add((int) rootVlan); } IPacket payload = packet.getPayload(); while (payload instanceof VlanTag) { short vlanId = ((VlanTag) payload).getVlanId(); vlanStack.add((int) vlanId); payload = payload.getPayload(); } return payload; } private EthernetPacketToolbox(); }
|
EthernetPacketToolbox { public static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack) { short rootVlan = packet.getVlanID(); if (0 < rootVlan) { vlanStack.add((int) rootVlan); } IPacket payload = packet.getPayload(); while (payload instanceof VlanTag) { short vlanId = ((VlanTag) payload).getVlanId(); vlanStack.add((int) vlanId); payload = payload.getPayload(); } return payload; } private EthernetPacketToolbox(); static IPacket injectVlan(IPacket payload, int vlanId, EthType type); static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack); }
|
EthernetPacketToolbox { public static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack) { short rootVlan = packet.getVlanID(); if (0 < rootVlan) { vlanStack.add((int) rootVlan); } IPacket payload = packet.getPayload(); while (payload instanceof VlanTag) { short vlanId = ((VlanTag) payload).getVlanId(); vlanStack.add((int) vlanId); payload = payload.getPayload(); } return payload; } private EthernetPacketToolbox(); static IPacket injectVlan(IPacket payload, int vlanId, EthType type); static IPacket extractPayload(Ethernet packet, List<Integer> vlanStack); }
|
@Test(expected = IllegalSwitchPropertiesException.class) public void shouldValidateFlowWithArpFlagWhenUpdatingSwitchProperties() { Switch firstSwitch = Switch.builder().switchId(TEST_SWITCH_ID).status(SwitchStatus.ACTIVE).build(); Switch secondSwitch = Switch.builder().switchId(TEST_SWITCH_ID_2).status(SwitchStatus.ACTIVE).build(); switchRepository.add(firstSwitch); switchRepository.add(secondSwitch); Flow flow = Flow.builder() .flowId(TEST_FLOW_ID_1) .srcSwitch(firstSwitch) .destSwitch(secondSwitch) .detectConnectedDevices( new DetectConnectedDevices(false, true, false, true, false, false, false, false)) .build(); flowRepository.add(flow); createSwitchProperties(firstSwitch, Collections.singleton(FlowEncapsulationType.TRANSIT_VLAN), true, false, false); SwitchPropertiesDto update = new SwitchPropertiesDto(); update.setSupportedTransitEncapsulation( Collections.singleton(org.openkilda.messaging.payload.flow.FlowEncapsulationType.TRANSIT_VLAN)); update.setMultiTable(false); update.setSwitchArp(false); switchOperationsService.updateSwitchProperties(TEST_SWITCH_ID, update); }
|
public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
@Test public void inaccurateSetFieldVlanVidActionNegative() { OfFlowPresenceVerifier presenceVerifier = testInaccurateSetFieldVlanVidAction(Collections.emptySet()); Assert.assertFalse(presenceVerifier.getMissing().isEmpty()); }
|
public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } OfFlowPresenceVerifier(
IOfFlowDumpProducer dumpProducer, List<OFFlowMod> expectedFlows, Set<SwitchFeature> switchFeatures); }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } OfFlowPresenceVerifier(
IOfFlowDumpProducer dumpProducer, List<OFFlowMod> expectedFlows, Set<SwitchFeature> switchFeatures); List<OFFlowMod> getMissing(); }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } OfFlowPresenceVerifier(
IOfFlowDumpProducer dumpProducer, List<OFFlowMod> expectedFlows, Set<SwitchFeature> switchFeatures); List<OFFlowMod> getMissing(); }
|
@Test public void inaccurateSetFieldVlanVidActionPositive() { OfFlowPresenceVerifier presenceVerifier = testInaccurateSetFieldVlanVidAction(Collections.singleton( SwitchFeature.INACCURATE_SET_VLAN_VID_ACTION)); Assert.assertTrue(presenceVerifier.getMissing().isEmpty()); }
|
public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } OfFlowPresenceVerifier(
IOfFlowDumpProducer dumpProducer, List<OFFlowMod> expectedFlows, Set<SwitchFeature> switchFeatures); }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } OfFlowPresenceVerifier(
IOfFlowDumpProducer dumpProducer, List<OFFlowMod> expectedFlows, Set<SwitchFeature> switchFeatures); List<OFFlowMod> getMissing(); }
|
OfFlowPresenceVerifier { public List<OFFlowMod> getMissing() { return expectedOfFlows.values().stream() .flatMap(Collection::stream) .collect(Collectors.toList()); } OfFlowPresenceVerifier(
IOfFlowDumpProducer dumpProducer, List<OFFlowMod> expectedFlows, Set<SwitchFeature> switchFeatures); List<OFFlowMod> getMissing(); }
|
@Ignore("Fix applying aspects to test classes") @Test @NewCorrelationContextRequired public void shouldInitializeCorrelationId() { String correlationId = CorrelationContext.getId(); assertNotEquals(DEFAULT_CORRELATION_ID, correlationId); }
|
public static String getId() { return Optional.ofNullable(ID.get()).orElse(DEFAULT_CORRELATION_ID); }
|
CorrelationContext { public static String getId() { return Optional.ofNullable(ID.get()).orElse(DEFAULT_CORRELATION_ID); } }
|
CorrelationContext { public static String getId() { return Optional.ofNullable(ID.get()).orElse(DEFAULT_CORRELATION_ID); } private CorrelationContext(); }
|
CorrelationContext { public static String getId() { return Optional.ofNullable(ID.get()).orElse(DEFAULT_CORRELATION_ID); } private CorrelationContext(); static String getId(); static CorrelationContextClosable create(String correlationId); }
|
CorrelationContext { public static String getId() { return Optional.ofNullable(ID.get()).orElse(DEFAULT_CORRELATION_ID); } private CorrelationContext(); static String getId(); static CorrelationContextClosable create(String correlationId); }
|
@Test public void testIsNoviSwitch() { assertTrue(isNoviSwitch(makeSwitchMock("NoviFlow Inc", "NW400.4.0", "NS21100"))); assertTrue(isNoviSwitch(makeSwitchMock("NoviFlow Inc", "NW500.2.0_dev", "NS21100"))); assertTrue(isNoviSwitch(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "WB5164-E"))); assertTrue(isNoviSwitch(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "SM5000-SM"))); assertTrue(isNoviSwitch(makeSwitchMock("E", "NW400.4.0", "WB5164"))); assertFalse(isNoviSwitch(makeSwitchMock("Common Inc", "Soft123", "Hard123"))); assertFalse(isNoviSwitch(makeSwitchMock("Nicira, Inc.", "Soft123", "Hard123"))); assertFalse(isNoviSwitch(makeSwitchMock("2004-2016 Centec Networks Inc", "2.8.16.21", "48T"))); assertFalse(isNoviSwitch(makeSwitchMock("Sonus Networks Inc, 4 Technology Park Dr, Westford, MA 01886, USA", "8.1.0.14", "VX3048"))); }
|
static boolean isNoviSwitch(IOFSwitch sw) { if (sw == null || sw.getSwitchDescription() == null) { return false; } String manufacturer = getManufacturer(sw); if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(manufacturer)) { return true; } SwitchDescription description = sw.getSwitchDescription(); if (E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( Optional.ofNullable(description.getHardwareDescription()).orElse("")).matches()) { return true; } return manufacturer.toLowerCase().contains(NOVIFLOW_MANUFACTURER_SUFFIX); }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isNoviSwitch(IOFSwitch sw) { if (sw == null || sw.getSwitchDescription() == null) { return false; } String manufacturer = getManufacturer(sw); if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(manufacturer)) { return true; } SwitchDescription description = sw.getSwitchDescription(); if (E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( Optional.ofNullable(description.getHardwareDescription()).orElse("")).matches()) { return true; } return manufacturer.toLowerCase().contains(NOVIFLOW_MANUFACTURER_SUFFIX); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isNoviSwitch(IOFSwitch sw) { if (sw == null || sw.getSwitchDescription() == null) { return false; } String manufacturer = getManufacturer(sw); if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(manufacturer)) { return true; } SwitchDescription description = sw.getSwitchDescription(); if (E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( Optional.ofNullable(description.getHardwareDescription()).orElse("")).matches()) { return true; } return manufacturer.toLowerCase().contains(NOVIFLOW_MANUFACTURER_SUFFIX); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isNoviSwitch(IOFSwitch sw) { if (sw == null || sw.getSwitchDescription() == null) { return false; } String manufacturer = getManufacturer(sw); if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(manufacturer)) { return true; } SwitchDescription description = sw.getSwitchDescription(); if (E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( Optional.ofNullable(description.getHardwareDescription()).orElse("")).matches()) { return true; } return manufacturer.toLowerCase().contains(NOVIFLOW_MANUFACTURER_SUFFIX); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isNoviSwitch(IOFSwitch sw) { if (sw == null || sw.getSwitchDescription() == null) { return false; } String manufacturer = getManufacturer(sw); if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(manufacturer)) { return true; } SwitchDescription description = sw.getSwitchDescription(); if (E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( Optional.ofNullable(description.getHardwareDescription()).orElse("")).matches()) { return true; } return manufacturer.toLowerCase().contains(NOVIFLOW_MANUFACTURER_SUFFIX); } static final String NOVIFLOW_MANUFACTURER_SUFFIX; }
|
@Test public void testIsWbSeries() { assertTrue(isWbSeries(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "WB5164-E"))); assertTrue(isWbSeries(makeSwitchMock("E", "NW400.4.0", "WB5164"))); assertFalse(isWbSeries(makeSwitchMock("NoviFlow Inc", "NW400.4.0", "NS21100"))); assertFalse(isWbSeries(makeSwitchMock("NoviFlow Inc", "NW500.2.0_dev", "NS21100"))); assertFalse(isWbSeries(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "SM5000-SM"))); assertFalse(isWbSeries(makeSwitchMock("Common Inc", "Soft123", "Hard123"))); assertFalse(isWbSeries(makeSwitchMock("Nicira, Inc.", "Soft123", "Hard123"))); assertFalse(isWbSeries(makeSwitchMock("2004-2016 Centec Networks Inc", "2.8.16.21", "48T"))); assertFalse(isWbSeries(makeSwitchMock("Sonus Networks Inc, 4 Technology Park Dr, Westford, MA 01886, USA", "8.1.0.14", "VX3048"))); }
|
static boolean isWbSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(getManufacturer(sw))) { return true; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description .map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isWbSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(getManufacturer(sw))) { return true; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description .map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isWbSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(getManufacturer(sw))) { return true; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description .map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isWbSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(getManufacturer(sw))) { return true; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description .map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isWbSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } if (E_SWITCH_MANUFACTURER_DESCRIPTION.equalsIgnoreCase(getManufacturer(sw))) { return true; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return E_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description .map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } static final String NOVIFLOW_MANUFACTURER_SUFFIX; }
|
@Test public void testIsSmSeries() { assertTrue(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "SM5000-SM"))); assertFalse(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW500.0.1", "WB5164-E"))); assertFalse(isSmSeries(makeSwitchMock("E", "NW400.4.0", "WB5164"))); assertFalse(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW400.4.0", "NS21100"))); assertFalse(isSmSeries(makeSwitchMock("NoviFlow Inc", "NW500.2.0_dev", "NS21100"))); assertFalse(isSmSeries(makeSwitchMock("Common Inc", "Soft123", "Hard123"))); assertFalse(isSmSeries(makeSwitchMock("Nicira, Inc.", "Soft123", "Hard123"))); assertFalse(isSmSeries(makeSwitchMock("2004-2016 Centec Networks Inc", "2.8.16.21", "48T"))); assertFalse(isSmSeries(makeSwitchMock("Sonus Networks Inc, 4 Technology Park Dr, Westford, MA 01886, USA", "8.1.0.14", "VX3048"))); }
|
static boolean isSmSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return NOVIFLOW_VIRTUAL_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description.map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isSmSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return NOVIFLOW_VIRTUAL_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description.map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isSmSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return NOVIFLOW_VIRTUAL_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description.map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isSmSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return NOVIFLOW_VIRTUAL_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description.map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } }
|
NoviflowSpecificFeature extends AbstractFeature { static boolean isSmSeries(IOFSwitch sw) { if (! isNoviSwitch(sw)) { return false; } Optional<SwitchDescription> description = Optional.ofNullable(sw.getSwitchDescription()); return NOVIFLOW_VIRTUAL_SWITCH_HARDWARE_DESCRIPTION_REGEX.matcher( description.map(SwitchDescription::getHardwareDescription) .orElse("")).matches(); } static final String NOVIFLOW_MANUFACTURER_SUFFIX; }
|
@Test public void testDiscoverOfSwitchWithoutBfdSupport() { Assert.assertFalse(bfdFeature.discover(createSwitchWithDescription(null)).isPresent()); assertWithoutBfdSupport("2.8.16.21"); assertWithoutBfdSupport("2.8.16.15"); assertWithoutBfdSupport("8.1.0.14"); }
|
@Override public Optional<SwitchFeature> discover(IOFSwitch sw) { Optional<SwitchFeature> empty = Optional.empty(); SwitchDescription description = sw.getSwitchDescription(); if (description == null || description.getSoftwareDescription() == null) { return empty; } if (!NOVIFLOW_SOFTWARE_DESCRIPTION_REGEX.matcher(description.getSoftwareDescription()).matches()) { return empty; } return Optional.of(SwitchFeature.BFD); }
|
BfdFeature extends AbstractFeature { @Override public Optional<SwitchFeature> discover(IOFSwitch sw) { Optional<SwitchFeature> empty = Optional.empty(); SwitchDescription description = sw.getSwitchDescription(); if (description == null || description.getSoftwareDescription() == null) { return empty; } if (!NOVIFLOW_SOFTWARE_DESCRIPTION_REGEX.matcher(description.getSoftwareDescription()).matches()) { return empty; } return Optional.of(SwitchFeature.BFD); } }
|
BfdFeature extends AbstractFeature { @Override public Optional<SwitchFeature> discover(IOFSwitch sw) { Optional<SwitchFeature> empty = Optional.empty(); SwitchDescription description = sw.getSwitchDescription(); if (description == null || description.getSoftwareDescription() == null) { return empty; } if (!NOVIFLOW_SOFTWARE_DESCRIPTION_REGEX.matcher(description.getSoftwareDescription()).matches()) { return empty; } return Optional.of(SwitchFeature.BFD); } }
|
BfdFeature extends AbstractFeature { @Override public Optional<SwitchFeature> discover(IOFSwitch sw) { Optional<SwitchFeature> empty = Optional.empty(); SwitchDescription description = sw.getSwitchDescription(); if (description == null || description.getSoftwareDescription() == null) { return empty; } if (!NOVIFLOW_SOFTWARE_DESCRIPTION_REGEX.matcher(description.getSoftwareDescription()).matches()) { return empty; } return Optional.of(SwitchFeature.BFD); } @Override Optional<SwitchFeature> discover(IOFSwitch sw); }
|
BfdFeature extends AbstractFeature { @Override public Optional<SwitchFeature> discover(IOFSwitch sw) { Optional<SwitchFeature> empty = Optional.empty(); SwitchDescription description = sw.getSwitchDescription(); if (description == null || description.getSoftwareDescription() == null) { return empty; } if (!NOVIFLOW_SOFTWARE_DESCRIPTION_REGEX.matcher(description.getSoftwareDescription()).matches()) { return empty; } return Optional.of(SwitchFeature.BFD); } @Override Optional<SwitchFeature> discover(IOFSwitch sw); }
|
@Test public void testToPortStatsDataV13() { OFFactoryVer13 ofFactoryVer13 = new OFFactoryVer13(); OFPortStatsEntry ofPortStatsEntry = prebuildPortStatsEntry(ofFactoryVer13.buildPortStatsEntry()) .setRxFrameErr(U64.of(rxFrameErr)) .setRxOverErr(U64.of(rxOverErr)) .setRxCrcErr(U64.of(rxCrcErr)) .setCollisions(U64.of(collisions)).build(); OFPortStatsReply ofPortStatsReply = ofFactoryVer13.buildPortStatsReply() .setXid(xId) .setEntries(Collections.singletonList(ofPortStatsEntry)) .build(); PortStatsData data = OfPortStatsMapper.INSTANCE.toPostStatsData( Collections.singletonList(ofPortStatsReply), switchId); assertPortStatsData(data); }
|
public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId); PortStatsEntry toPortStatsEntry(OFPortStatsEntry entry); }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId); PortStatsEntry toPortStatsEntry(OFPortStatsEntry entry); static final OfPortStatsMapper INSTANCE; }
|
@Test public void testToPortStatsDataV14() { OFFactoryVer14 ofFactoryVer14 = new OFFactoryVer14(); OFPortStatsProp opticalProps = ofFactoryVer14.buildPortStatsPropOptical().setRxPwr(123).build(); OFPortStatsProp ethernetProps = ofFactoryVer14.buildPortStatsPropEthernet() .setRxFrameErr(U64.of(rxFrameErr)) .setRxOverErr(U64.of(rxOverErr)) .setRxCrcErr(U64.of(rxCrcErr)) .setCollisions(U64.of(collisions)) .build(); OFPortStatsEntry ofPortStatsEntry = prebuildPortStatsEntry(ofFactoryVer14.buildPortStatsEntry()) .setProperties(Lists.newArrayList(opticalProps, ethernetProps)) .build(); OFPortStatsReply ofPortStatsReply = ofFactoryVer14.buildPortStatsReply() .setXid(xId) .setEntries(Collections.singletonList(ofPortStatsEntry)) .build(); PortStatsData data = OfPortStatsMapper.INSTANCE.toPostStatsData( Collections.singletonList(ofPortStatsReply), switchId); assertPortStatsData(data); }
|
public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId); PortStatsEntry toPortStatsEntry(OFPortStatsEntry entry); }
|
OfPortStatsMapper { public PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId) { try { List<PortStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toPortStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new PortStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert port stats data %s on switch %s", data, switchId), e); return null; } } PortStatsData toPostStatsData(List<OFPortStatsReply> data, SwitchId switchId); PortStatsEntry toPortStatsEntry(OFPortStatsEntry entry); static final OfPortStatsMapper INSTANCE; }
|
@Test public void testToFlowStatsData() { OFFlowStatsEntry ofEntry = buildFlowStatsEntry(); OFFlowStatsReply ofReply = factory.buildFlowStatsReply() .setXid(xId) .setEntries(Collections.singletonList(ofEntry)) .build(); FlowStatsData data = OfFlowStatsMapper.INSTANCE.toFlowStatsData(Collections.singletonList(ofReply), switchId); assertEquals(switchId, data.getSwitchId()); assertEquals(1, data.getStats().size()); FlowStatsEntry entry = data.getStats().get(0); assertEquals(tableId, entry.getTableId()); assertEquals(cookie, entry.getCookie()); assertEquals(packetCount, entry.getPacketCount()); assertEquals(byteCount, entry.getByteCount()); }
|
public FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId) { try { List<FlowStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toFlowStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new FlowStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert flow stats data %s on switch %s", data, switchId), e); return null; } }
|
OfFlowStatsMapper { public FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId) { try { List<FlowStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toFlowStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new FlowStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert flow stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfFlowStatsMapper { public FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId) { try { List<FlowStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toFlowStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new FlowStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert flow stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfFlowStatsMapper { public FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId) { try { List<FlowStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toFlowStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new FlowStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert flow stats data %s on switch %s", data, switchId), e); return null; } } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); }
|
OfFlowStatsMapper { public FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId) { try { List<FlowStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toFlowStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new FlowStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert flow stats data %s on switch %s", data, switchId), e); return null; } } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); static final OfFlowStatsMapper INSTANCE; }
|
@Test public void shouldUpdateServer42SwitchProperties() { Switch sw = Switch.builder() .switchId(TEST_SWITCH_ID) .status(SwitchStatus.ACTIVE) .features(Collections.singleton(SwitchFeature.MULTI_TABLE)) .build(); switchRepository.add(sw); createServer42SwitchProperties(sw, false, SERVER_42_PORT_1, SERVER_42_VLAN_1, SERVER_42_MAC_ADDRESS_1); SwitchPropertiesDto update = new SwitchPropertiesDto(); update.setSupportedTransitEncapsulation( Collections.singleton(org.openkilda.messaging.payload.flow.FlowEncapsulationType.TRANSIT_VLAN)); update.setMultiTable(true); update.setServer42FlowRtt(true); update.setServer42Port(SERVER_42_PORT_2); update.setServer42Vlan(SERVER_42_VLAN_2); update.setServer42MacAddress(SERVER_42_MAC_ADDRESS_2); switchOperationsService.updateSwitchProperties(TEST_SWITCH_ID, update); Optional<SwitchProperties> updated = switchPropertiesRepository.findBySwitchId(TEST_SWITCH_ID); assertTrue(updated.isPresent()); assertTrue(updated.get().isServer42FlowRtt()); assertEquals(SERVER_42_PORT_2, updated.get().getServer42Port()); assertEquals(SERVER_42_VLAN_2, updated.get().getServer42Vlan()); assertEquals(SERVER_42_MAC_ADDRESS_2, updated.get().getServer42MacAddress()); }
|
public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto) { if (isEmpty(switchPropertiesDto.getSupportedTransitEncapsulation())) { throw new IllegalSwitchPropertiesException("Supported transit encapsulations should not be null or empty"); } SwitchProperties update = SwitchPropertiesMapper.INSTANCE.map(switchPropertiesDto); UpdateSwitchPropertiesResult result = transactionManager.doInTransaction(() -> { SwitchProperties switchProperties = switchPropertiesRepository.findBySwitchId(switchId) .orElseThrow(() -> new SwitchPropertiesNotFoundException(switchId)); validateSwitchProperties(switchId, update); final boolean isSwitchSyncNeeded = isSwitchSyncNeeded(switchProperties, update); switchProperties.setMultiTable(update.isMultiTable()); switchProperties.setSwitchLldp(update.isSwitchLldp()); switchProperties.setSwitchArp(update.isSwitchArp()); switchProperties.setSupportedTransitEncapsulation(update.getSupportedTransitEncapsulation()); switchProperties.setServer42FlowRtt(update.isServer42FlowRtt()); switchProperties.setServer42Port(update.getServer42Port()); switchProperties.setServer42Vlan(update.getServer42Vlan()); switchProperties.setServer42MacAddress(update.getServer42MacAddress()); return new UpdateSwitchPropertiesResult( SwitchPropertiesMapper.INSTANCE.map(switchProperties), isSwitchSyncNeeded); }); if (result.isSwitchSyncRequired()) { carrier.requestSwitchSync(switchId); } if (switchPropertiesDto.isServer42FlowRtt()) { carrier.enableServer42FlowRttOnSwitch(switchId); } else { carrier.disableServer42FlowRttOnSwitch(switchId); } return result.switchPropertiesDto; } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
@Test public void testFlowEntry() { OFFlowStatsEntry ofEntry = buildFlowStatsEntry(); FlowEntry entry = OfFlowStatsMapper.INSTANCE.toFlowEntry(ofEntry); assertEquals(tableId, entry.getTableId()); assertEquals(cookie, entry.getCookie()); assertEquals(packetCount, entry.getPacketCount()); assertEquals(byteCount, entry.getByteCount()); assertEquals(durationSec, entry.getDurationSeconds()); assertEquals(durationNsec, entry.getDurationNanoSeconds()); assertEquals(hardTimeout, entry.getHardTimeout()); assertEquals(idleTimeout, entry.getIdleTimeout()); assertEquals(priority, entry.getPriority()); assertEquals(String.valueOf(vlanVid.getVlan()), entry.getMatch().getVlanVid()); assertEquals(ethType.toString(), entry.getMatch().getEthType()); assertEquals(ethDst.toString(), entry.getMatch().getEthDst()); assertEquals(port.toString(), entry.getMatch().getInPort()); assertEquals(ipProto.toString(), entry.getMatch().getIpProto()); assertEquals(udpSrc.toString(), entry.getMatch().getUdpSrc()); assertEquals(udpDst.toString(), entry.getMatch().getUdpDst()); FlowSetFieldAction flowSetEthSrcAction = new FlowSetFieldAction("eth_src", MAC_ADDRESS_1); FlowSetFieldAction flowSetEthDstAction = new FlowSetFieldAction("eth_dst", MAC_ADDRESS_2); FlowCopyFieldAction flowCopyFieldAction = FlowCopyFieldAction.builder() .bits(String.valueOf(bits)) .srcOffset(String.valueOf(srcOffset)) .dstOffset(String.valueOf(dstOffset)) .srcOxm(String.valueOf(oxmSrcHeader)) .dstOxm(String.valueOf(oxmDstHeader)) .build(); FlowSwapFieldAction flowSwapFieldAction = FlowSwapFieldAction.builder() .bits(String.valueOf(bits)) .srcOffset(String.valueOf(srcOffset)) .dstOffset(String.valueOf(dstOffset)) .srcOxm(String.valueOf(oxmSrcHeader)) .dstOxm(String.valueOf(oxmDstHeader)) .build(); FlowApplyActions applyActions = new FlowApplyActions(port.toString(), Lists.newArrayList(flowSetEthSrcAction, flowSetEthDstAction), ethType.toString(), null, null, null, group.toString(), flowCopyFieldAction, flowSwapFieldAction); FlowInstructions instructions = new FlowInstructions(applyActions, null, meterId, goToTable.getValue()); assertEquals(instructions, entry.getInstructions()); }
|
public FlowEntry toFlowEntry(final OFFlowStatsEntry entry) { return FlowEntry.builder() .version(entry.getVersion().toString()) .durationSeconds(entry.getDurationSec()) .durationNanoSeconds(entry.getDurationNsec()) .hardTimeout(entry.getHardTimeout()) .idleTimeout(entry.getIdleTimeout()) .priority(entry.getPriority()) .byteCount(entry.getByteCount().getValue()) .packetCount(entry.getPacketCount().getValue()) .flags(entry.getFlags().stream() .map(OFFlowModFlags::name) .toArray(String[]::new)) .cookie(entry.getCookie().getValue()) .tableId(entry.getTableId().getValue()) .match(toFlowMatchField(entry.getMatch())) .instructions(toFlowInstructions(entry.getInstructions())) .build(); }
|
OfFlowStatsMapper { public FlowEntry toFlowEntry(final OFFlowStatsEntry entry) { return FlowEntry.builder() .version(entry.getVersion().toString()) .durationSeconds(entry.getDurationSec()) .durationNanoSeconds(entry.getDurationNsec()) .hardTimeout(entry.getHardTimeout()) .idleTimeout(entry.getIdleTimeout()) .priority(entry.getPriority()) .byteCount(entry.getByteCount().getValue()) .packetCount(entry.getPacketCount().getValue()) .flags(entry.getFlags().stream() .map(OFFlowModFlags::name) .toArray(String[]::new)) .cookie(entry.getCookie().getValue()) .tableId(entry.getTableId().getValue()) .match(toFlowMatchField(entry.getMatch())) .instructions(toFlowInstructions(entry.getInstructions())) .build(); } }
|
OfFlowStatsMapper { public FlowEntry toFlowEntry(final OFFlowStatsEntry entry) { return FlowEntry.builder() .version(entry.getVersion().toString()) .durationSeconds(entry.getDurationSec()) .durationNanoSeconds(entry.getDurationNsec()) .hardTimeout(entry.getHardTimeout()) .idleTimeout(entry.getIdleTimeout()) .priority(entry.getPriority()) .byteCount(entry.getByteCount().getValue()) .packetCount(entry.getPacketCount().getValue()) .flags(entry.getFlags().stream() .map(OFFlowModFlags::name) .toArray(String[]::new)) .cookie(entry.getCookie().getValue()) .tableId(entry.getTableId().getValue()) .match(toFlowMatchField(entry.getMatch())) .instructions(toFlowInstructions(entry.getInstructions())) .build(); } }
|
OfFlowStatsMapper { public FlowEntry toFlowEntry(final OFFlowStatsEntry entry) { return FlowEntry.builder() .version(entry.getVersion().toString()) .durationSeconds(entry.getDurationSec()) .durationNanoSeconds(entry.getDurationNsec()) .hardTimeout(entry.getHardTimeout()) .idleTimeout(entry.getIdleTimeout()) .priority(entry.getPriority()) .byteCount(entry.getByteCount().getValue()) .packetCount(entry.getPacketCount().getValue()) .flags(entry.getFlags().stream() .map(OFFlowModFlags::name) .toArray(String[]::new)) .cookie(entry.getCookie().getValue()) .tableId(entry.getTableId().getValue()) .match(toFlowMatchField(entry.getMatch())) .instructions(toFlowInstructions(entry.getInstructions())) .build(); } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); }
|
OfFlowStatsMapper { public FlowEntry toFlowEntry(final OFFlowStatsEntry entry) { return FlowEntry.builder() .version(entry.getVersion().toString()) .durationSeconds(entry.getDurationSec()) .durationNanoSeconds(entry.getDurationNsec()) .hardTimeout(entry.getHardTimeout()) .idleTimeout(entry.getIdleTimeout()) .priority(entry.getPriority()) .byteCount(entry.getByteCount().getValue()) .packetCount(entry.getPacketCount().getValue()) .flags(entry.getFlags().stream() .map(OFFlowModFlags::name) .toArray(String[]::new)) .cookie(entry.getCookie().getValue()) .tableId(entry.getTableId().getValue()) .match(toFlowMatchField(entry.getMatch())) .instructions(toFlowInstructions(entry.getInstructions())) .build(); } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); static final OfFlowStatsMapper INSTANCE; }
|
@Test public void testFlowGroupEntry() { OFGroupDescStatsEntry entry = buildFlowGroupEntry(); GroupEntry result = OfFlowStatsMapper.INSTANCE.toFlowGroupEntry(entry); assertEquals(entry.getGroup().getGroupNumber(), result.getGroupId()); assertEquals(entry.getGroupType().toString(), result.getGroupType()); assertEquals(entry.getBuckets().size(), result.getBuckets().size()); GroupBucket firstBucket = result.getBuckets().get(0); assertEquals("12", firstBucket.getApplyActions().getFlowOutput()); GroupBucket secondBucket = result.getBuckets().get(1); assertEquals(EthType.VLAN_FRAME.toString(), secondBucket.getApplyActions().getPushVlan()); assertEquals("vlan_vid", secondBucket.getApplyActions().getSetFieldActions().get(0).getFieldName()); assertEquals("12", secondBucket.getApplyActions().getSetFieldActions().get(0).getFieldValue()); assertEquals("1", secondBucket.getApplyActions().getFlowOutput()); }
|
public GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry) { if (ofGroupDescStatsEntry == null) { return null; } return GroupEntry.builder() .groupType(ofGroupDescStatsEntry.getGroupType().toString()) .groupId(ofGroupDescStatsEntry.getGroup().getGroupNumber()) .buckets(ofGroupDescStatsEntry.getBuckets().stream() .map(this::toGroupBucket) .collect(toList())) .build(); }
|
OfFlowStatsMapper { public GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry) { if (ofGroupDescStatsEntry == null) { return null; } return GroupEntry.builder() .groupType(ofGroupDescStatsEntry.getGroupType().toString()) .groupId(ofGroupDescStatsEntry.getGroup().getGroupNumber()) .buckets(ofGroupDescStatsEntry.getBuckets().stream() .map(this::toGroupBucket) .collect(toList())) .build(); } }
|
OfFlowStatsMapper { public GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry) { if (ofGroupDescStatsEntry == null) { return null; } return GroupEntry.builder() .groupType(ofGroupDescStatsEntry.getGroupType().toString()) .groupId(ofGroupDescStatsEntry.getGroup().getGroupNumber()) .buckets(ofGroupDescStatsEntry.getBuckets().stream() .map(this::toGroupBucket) .collect(toList())) .build(); } }
|
OfFlowStatsMapper { public GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry) { if (ofGroupDescStatsEntry == null) { return null; } return GroupEntry.builder() .groupType(ofGroupDescStatsEntry.getGroupType().toString()) .groupId(ofGroupDescStatsEntry.getGroup().getGroupNumber()) .buckets(ofGroupDescStatsEntry.getBuckets().stream() .map(this::toGroupBucket) .collect(toList())) .build(); } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); }
|
OfFlowStatsMapper { public GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry) { if (ofGroupDescStatsEntry == null) { return null; } return GroupEntry.builder() .groupType(ofGroupDescStatsEntry.getGroupType().toString()) .groupId(ofGroupDescStatsEntry.getGroup().getGroupNumber()) .buckets(ofGroupDescStatsEntry.getBuckets().stream() .map(this::toGroupBucket) .collect(toList())) .build(); } FlowEntry toFlowEntry(final OFFlowStatsEntry entry); FlowEntry toFlowEntry(final OFFlowMod entry); FlowMatchField toFlowMatchField(final Match match); FlowInstructions toFlowInstructions(final List<OFInstruction> instructions); GroupEntry toFlowGroupEntry(OFGroupDescStatsEntry ofGroupDescStatsEntry); GroupBucket toGroupBucket(OFBucket ofBucket); FlowApplyActions toFlowApplyActions(List<OFAction> ofApplyActions); FlowStatsData toFlowStatsData(List<OFFlowStatsReply> data, SwitchId switchId); FlowStatsEntry toFlowStatsEntry(OFFlowStatsEntry entry); static final OfFlowStatsMapper INSTANCE; }
|
@Test public void shouldConvertSuccessfully() { OFFactoryVer13 ofFactoryVer13 = new OFFactoryVer13(); OFTableStatsEntry entry = ofFactoryVer13.buildTableStatsEntry() .setTableId(TableId.of(11)) .setActiveCount(10) .setMatchedCount(U64.of(100001L)) .setLookupCount(U64.of(100002L)) .build(); TableStatsEntry result = OfTableStatsMapper.INSTANCE.toTableStatsEntry(entry); assertEquals(result.getTableId(), entry.getTableId().getValue()); assertEquals(result.getActiveEntries(), entry.getActiveCount()); assertEquals(result.getLookupCount(), entry.getLookupCount().getValue()); assertEquals(result.getMatchedCount(), entry.getMatchedCount().getValue()); }
|
@Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") public abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source);
|
OfTableStatsMapper { @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") public abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); }
|
OfTableStatsMapper { @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") public abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); }
|
OfTableStatsMapper { @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") public abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); }
|
OfTableStatsMapper { @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") public abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); @Mapping(source = "tableId.value", target = "tableId") @Mapping(source = "activeCount", target = "activeEntries") @Mapping(source = "lookupCount.value", target = "lookupCount") @Mapping(source = "matchedCount.value", target = "matchedCount") abstract TableStatsEntry toTableStatsEntry(OFTableStatsEntry source); static final OfTableStatsMapper INSTANCE; }
|
@Test public void testReservedPortCheck() { for (OFPort port : new OFPort[]{ OFPort.LOCAL, OFPort.ALL, OFPort.CONTROLLER, OFPort.ANY, OFPort.FLOOD, OFPort.NO_MASK, OFPort.IN_PORT, OFPort.NORMAL, OFPort.TABLE}) { Assert.assertTrue(String.format("Port %s must be detected as RESERVED, but it's not", port), OfPortDescConverter.INSTANCE.isReservedPort(port)); } for (OFPort port : new OFPort[]{ OFPort.of(1), OFPort.of(OFPort.MAX.getPortNumber() - 1)}) { Assert.assertFalse(String.format("Port %s must be detected as NOT RESERVED, but it's not", port), OfPortDescConverter.INSTANCE.isReservedPort(port)); } }
|
public boolean isReservedPort(OFPort port) { return OFPort.MAX.getPortNumber() <= port.getPortNumber() && port.getPortNumber() <= -1; }
|
OfPortDescConverter { public boolean isReservedPort(OFPort port) { return OFPort.MAX.getPortNumber() <= port.getPortNumber() && port.getPortNumber() <= -1; } }
|
OfPortDescConverter { public boolean isReservedPort(OFPort port) { return OFPort.MAX.getPortNumber() <= port.getPortNumber() && port.getPortNumber() <= -1; } }
|
OfPortDescConverter { public boolean isReservedPort(OFPort port) { return OFPort.MAX.getPortNumber() <= port.getPortNumber() && port.getPortNumber() <= -1; } PortDescription toPortDescription(OFPortDesc ofPortDesc); PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc,
net.floodlightcontroller.core.PortChangeType type); boolean isReservedPort(OFPort port); boolean isPortEnabled(OFPortDesc portDesc); }
|
OfPortDescConverter { public boolean isReservedPort(OFPort port) { return OFPort.MAX.getPortNumber() <= port.getPortNumber() && port.getPortNumber() <= -1; } PortDescription toPortDescription(OFPortDesc ofPortDesc); PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc,
net.floodlightcontroller.core.PortChangeType type); boolean isReservedPort(OFPort port); boolean isPortEnabled(OFPortDesc portDesc); static final OfPortDescConverter INSTANCE; }
|
@Test public void testPortChangeTypeMapping() { OFPortDesc portDesc = OFFactoryVer13.INSTANCE.buildPortDesc() .setPortNo(OFPort.of(1)) .setName("test") .build(); Map<org.openkilda.messaging.info.event.PortChangeType, net.floodlightcontroller.core.PortChangeType> expected = new HashMap<>(); expected.put(org.openkilda.messaging.info.event.PortChangeType.ADD, net.floodlightcontroller.core.PortChangeType.ADD); expected.put(org.openkilda.messaging.info.event.PortChangeType.OTHER_UPDATE, net.floodlightcontroller.core.PortChangeType.OTHER_UPDATE); expected.put(org.openkilda.messaging.info.event.PortChangeType.DELETE, net.floodlightcontroller.core.PortChangeType.DELETE); expected.put(org.openkilda.messaging.info.event.PortChangeType.UP, net.floodlightcontroller.core.PortChangeType.UP); expected.put(org.openkilda.messaging.info.event.PortChangeType.DOWN, net.floodlightcontroller.core.PortChangeType.DOWN); DatapathId dpId = DatapathId.of(1); for (Map.Entry<org.openkilda.messaging.info.event.PortChangeType, net.floodlightcontroller.core.PortChangeType> entry : expected.entrySet()) { PortInfoData encoded = OfPortDescConverter.INSTANCE.toPortInfoData(dpId, portDesc, entry.getValue()); Assert.assertSame(entry.getKey(), encoded.getState()); } }
|
public PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type) { return new PortInfoData( new SwitchId(dpId.getLong()), portDesc.getPortNo().getPortNumber(), mapChangeType(type), isPortEnabled(portDesc)); }
|
OfPortDescConverter { public PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type) { return new PortInfoData( new SwitchId(dpId.getLong()), portDesc.getPortNo().getPortNumber(), mapChangeType(type), isPortEnabled(portDesc)); } }
|
OfPortDescConverter { public PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type) { return new PortInfoData( new SwitchId(dpId.getLong()), portDesc.getPortNo().getPortNumber(), mapChangeType(type), isPortEnabled(portDesc)); } }
|
OfPortDescConverter { public PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type) { return new PortInfoData( new SwitchId(dpId.getLong()), portDesc.getPortNo().getPortNumber(), mapChangeType(type), isPortEnabled(portDesc)); } PortDescription toPortDescription(OFPortDesc ofPortDesc); PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc,
net.floodlightcontroller.core.PortChangeType type); boolean isReservedPort(OFPort port); boolean isPortEnabled(OFPortDesc portDesc); }
|
OfPortDescConverter { public PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc, net.floodlightcontroller.core.PortChangeType type) { return new PortInfoData( new SwitchId(dpId.getLong()), portDesc.getPortNo().getPortNumber(), mapChangeType(type), isPortEnabled(portDesc)); } PortDescription toPortDescription(OFPortDesc ofPortDesc); PortInfoData toPortInfoData(DatapathId dpId, OFPortDesc portDesc,
net.floodlightcontroller.core.PortChangeType type); boolean isReservedPort(OFPort port); boolean isPortEnabled(OFPortDesc portDesc); static final OfPortDescConverter INSTANCE; }
|
@Test public void testToPortStatsDataV13() { OFFactoryVer13 factory = new OFFactoryVer13(); OFMeterBandStats bandStats = factory.meterBandStats(U64.of(bandPacketCount), U64.of(bandByteCount)); OFMeterStats meterStats = factory.buildMeterStats() .setMeterId(meterId) .setByteInCount(U64.of(meterByteCount)) .setPacketInCount(U64.of(meterPacketCount)) .setBandStats(Collections.singletonList(bandStats)) .build(); OFMeterStatsReply reply = factory.buildMeterStatsReply() .setEntries(Collections.singletonList(meterStats)) .build(); MeterStatsData data = OfMeterStatsMapper.INSTANCE.toMeterStatsData(Collections.singletonList(reply), switchId); assertEquals(switchId, data.getSwitchId()); assertEquals(1, data.getStats().size()); MeterStatsEntry statsEntry = data.getStats().get(0); assertEquals(bandByteCount, statsEntry.getByteInCount()); assertEquals(bandPacketCount, statsEntry.getPacketsInCount()); }
|
public MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId) { try { List<MeterStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toMeterStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new MeterStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert meter stats data %s on switch %s", data, switchId), e); return null; } }
|
OfMeterStatsMapper { public MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId) { try { List<MeterStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toMeterStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new MeterStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert meter stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfMeterStatsMapper { public MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId) { try { List<MeterStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toMeterStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new MeterStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert meter stats data %s on switch %s", data, switchId), e); return null; } } }
|
OfMeterStatsMapper { public MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId) { try { List<MeterStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toMeterStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new MeterStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert meter stats data %s on switch %s", data, switchId), e); return null; } } MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId); MeterStatsEntry toMeterStatsEntry(OFMeterStats entry); }
|
OfMeterStatsMapper { public MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId) { try { List<MeterStatsEntry> stats = data.stream() .flatMap(reply -> reply.getEntries().stream()) .map(this::toMeterStatsEntry) .filter(Objects::nonNull) .collect(toList()); return new MeterStatsData(switchId, stats); } catch (NullPointerException | UnsupportedOperationException | IllegalArgumentException e) { log.error(String.format("Could not convert meter stats data %s on switch %s", data, switchId), e); return null; } } MeterStatsData toMeterStatsData(List<OFMeterStatsReply> data, SwitchId switchId); MeterStatsEntry toMeterStatsEntry(OFMeterStats entry); static final OfMeterStatsMapper INSTANCE; }
|
@Test public void switchAdded() throws Exception { SpeakerSwitchView expectedSwitchView = makeSwitchRecord(dpId, switchFeatures, true, true); Capture<Message> producedMessage = prepareAliveSwitchEvent(expectedSwitchView); replayAll(); service.switchAdded(dpId); verifySwitchEvent(SwitchChangeType.ADDED, expectedSwitchView, producedMessage); }
|
@Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void switchAddedMissing() throws Exception { Capture<Message> producedMessage = prepareRemovedSwitchEvent(); replayAll(); service.switchAdded(dpId); verifySwitchEvent(SwitchChangeType.ADDED, null, producedMessage); }
|
@Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchAdded(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.ADDED); switchDiscovery(switchId, SwitchChangeType.ADDED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void switchRemoved() { Capture<Message> producedMessage = prepareSwitchEventCommon(dpId); switchManager.deactivate(eq(dpId)); replayAll(); service.switchRemoved(dpId); verifySwitchEvent(SwitchChangeType.REMOVED, null, producedMessage); }
|
@Override @NewCorrelationContextRequired public void switchRemoved(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.REMOVED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.REMOVED); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchRemoved(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.REMOVED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.REMOVED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchRemoved(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.REMOVED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.REMOVED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchRemoved(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.REMOVED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.REMOVED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchRemoved(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.REMOVED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.REMOVED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void switchDeactivated() { Capture<Message> producedMessage = prepareSwitchEventCommon(dpId); switchManager.deactivate(eq(dpId)); replayAll(); service.switchDeactivated(dpId); verifySwitchEvent(SwitchChangeType.DEACTIVATED, null, producedMessage); }
|
@Override @NewCorrelationContextRequired public void switchDeactivated(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.DEACTIVATED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.DEACTIVATED); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchDeactivated(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.DEACTIVATED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.DEACTIVATED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchDeactivated(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.DEACTIVATED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.DEACTIVATED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchDeactivated(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.DEACTIVATED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.DEACTIVATED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchDeactivated(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.DEACTIVATED); switchManager.deactivate(switchId); switchDiscovery(switchId, SwitchChangeType.DEACTIVATED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void shouldPatchSwitch() throws SwitchNotFoundException { Switch sw = Switch.builder().switchId(TEST_SWITCH_ID).status(SwitchStatus.ACTIVE).build(); switchRepository.add(sw); SwitchPatch switchPatch = new SwitchPatch("pop", new SwitchLocation(48.860611, 2.337633, "street", "city", "country")); switchOperationsService.patchSwitch(TEST_SWITCH_ID, switchPatch); Switch updatedSwitch = switchRepository.findById(TEST_SWITCH_ID).get(); assertEquals(switchPatch.getPop(), updatedSwitch.getPop()); assertEquals(switchPatch.getLocation().getLatitude(), updatedSwitch.getLatitude()); assertEquals(switchPatch.getLocation().getLongitude(), updatedSwitch.getLongitude()); assertEquals(switchPatch.getLocation().getStreet(), updatedSwitch.getStreet()); assertEquals(switchPatch.getLocation().getCity(), updatedSwitch.getCity()); assertEquals(switchPatch.getLocation().getCountry(), updatedSwitch.getCountry()); }
|
public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
@Test public void switchChanged() throws Exception { SpeakerSwitchView expectedSwitchRecord = makeSwitchRecord(dpId, switchFeatures, true, true); Capture<Message> producedMessage = prepareAliveSwitchEvent(expectedSwitchRecord); replayAll(); service.switchChanged(dpId); verifySwitchEvent(SwitchChangeType.CHANGED, expectedSwitchRecord, producedMessage); }
|
@Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void switchChangedMissing() throws Exception { Capture<Message> producedMessage = prepareRemovedSwitchEvent(); replayAll(); service.switchChanged(dpId); verifySwitchEvent(SwitchChangeType.CHANGED, null, producedMessage); }
|
@Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { @Override @NewCorrelationContextRequired public void switchChanged(final DatapathId switchId) { dashboardLogger.onSwitchEvent(switchId, SwitchChangeType.CHANGED); switchDiscovery(switchId, SwitchChangeType.CHANGED); } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void networkDumpTest() throws Exception { OFSwitch iofSwitch1 = mock(OFSwitch.class); OFSwitch iofSwitch2 = mock(OFSwitch.class); final DatapathId swAid = DatapathId.of(1); final DatapathId swBid = DatapathId.of(2); Map<DatapathId, IOFSwitch> switches = ImmutableMap.of( swAid, iofSwitch1, swBid, iofSwitch2 ); Map<DatapathId, InetSocketAddress> switchAddresses = ImmutableMap.of( swAid, new InetSocketAddress(Inet4Address.getByName("127.0.1.1"), 32768), swBid, new InetSocketAddress(Inet4Address.getByName("127.0.1.2"), 32768) ); SwitchDescription ofSwitchDescription = new SwitchDescription( switchDescription.getManufacturer(), switchDescription.getHardware(), switchDescription.getSoftware(), switchDescription.getSerialNumber(), switchDescription.getDatapath()); OFFactoryVer13 ofFactory = new OFFactoryVer13(); for (DatapathId swId : switches.keySet()) { IOFSwitch sw = switches.get(swId); expect(sw.getOFFactory()).andStubReturn(ofFactory); expect(sw.isActive()).andReturn(true).anyTimes(); expect(sw.getId()).andReturn(swId).anyTimes(); expect(sw.getSwitchDescription()).andReturn(ofSwitchDescription); expect(sw.getInetAddress()).andReturn(switchAddresses.get(swId)); expect(sw.getControllerRole()).andStubReturn(OFControllerRole.ROLE_EQUAL); OFConnection connect = createMock(OFConnection.class); expect(connect.getRemoteInetAddress()).andReturn(speakerSocketAddress); expect(sw.getConnectionByCategory(eq(LogicalOFMessageCategory.MAIN))).andReturn(connect); } expect(switchManager.getAllSwitchMap(true)).andReturn(switches); expect(switchManager.getPhysicalPorts(eq(iofSwitch1))).andReturn(ImmutableList.of( makePhysicalPortMock(1, true), makePhysicalPortMock(2, true) )); expect(switchManager.getPhysicalPorts(eq(iofSwitch2))).andReturn(ImmutableList.of( makePhysicalPortMock(3, true), makePhysicalPortMock(4, true), makePhysicalPortMock(5, false) )); expect(featureDetector.detectSwitch(iofSwitch1)) .andReturn(ImmutableSet.of(SwitchFeature.METERS)); expect(featureDetector.detectSwitch(iofSwitch2)) .andReturn(ImmutableSet.of(SwitchFeature.METERS, SwitchFeature.BFD)); ArrayList<Message> producedMessages = new ArrayList<>(); producerService.sendMessageAndTrack(eq(KAFKA_ISL_DISCOVERY_TOPIC), anyObject(), anyObject(InfoMessage.class)); expectLastCall().andAnswer(new IAnswer<Object>() { @Override public Object answer() { Message sentMessage = (Message) getCurrentArguments()[2]; sentMessage.setTimestamp(0); producedMessages.add(sentMessage); return null; } }).anyTimes(); replayAll(); String correlationId = "unit-test-correlation-id"; try (CorrelationContextClosable dummy = CorrelationContext.create(correlationId)) { service.dumpAllSwitches(); } verify(producerService); ArrayList<Message> expectedMessages = new ArrayList<>(); expectedMessages.add(new InfoMessage( new NetworkDumpSwitchData(new SpeakerSwitchView( new SwitchId(swAid.getLong()), new InetSocketAddress(Inet4Address.getByName("127.0.1.1"), 32768), new InetSocketAddress(Inet4Address.getByName("127.0.1.254"), 6653), "OF_13", switchDescription, ImmutableSet.of(SwitchFeature.METERS), ImmutableList.of( new SpeakerSwitchPortView(1, SpeakerSwitchPortView.State.UP), new SpeakerSwitchPortView(2, SpeakerSwitchPortView.State.UP))), true), 0, correlationId)); expectedMessages.add(new InfoMessage( new NetworkDumpSwitchData(new SpeakerSwitchView( new SwitchId(swBid.getLong()), new InetSocketAddress(Inet4Address.getByName("127.0.1.2"), 32768), new InetSocketAddress(Inet4Address.getByName("127.0.1.254"), 6653), "OF_13", switchDescription, ImmutableSet.of(SwitchFeature.METERS, SwitchFeature.BFD), ImmutableList.of( new SpeakerSwitchPortView(3, SpeakerSwitchPortView.State.UP), new SpeakerSwitchPortView(4, SpeakerSwitchPortView.State.UP), new SpeakerSwitchPortView(5, SpeakerSwitchPortView.State.DOWN))), true), 0, correlationId)); assertEquals(expectedMessages, producedMessages); }
|
public void dumpAllSwitches() { discoveryLock.writeLock().lock(); try { dumpAllSwitchesAction(); } finally { discoveryLock.writeLock().unlock(); } }
|
SwitchTrackingService implements IOFSwitchListener, IService { public void dumpAllSwitches() { discoveryLock.writeLock().lock(); try { dumpAllSwitchesAction(); } finally { discoveryLock.writeLock().unlock(); } } }
|
SwitchTrackingService implements IOFSwitchListener, IService { public void dumpAllSwitches() { discoveryLock.writeLock().lock(); try { dumpAllSwitchesAction(); } finally { discoveryLock.writeLock().unlock(); } } }
|
SwitchTrackingService implements IOFSwitchListener, IService { public void dumpAllSwitches() { discoveryLock.writeLock().lock(); try { dumpAllSwitchesAction(); } finally { discoveryLock.writeLock().unlock(); } } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
SwitchTrackingService implements IOFSwitchListener, IService { public void dumpAllSwitches() { discoveryLock.writeLock().lock(); try { dumpAllSwitchesAction(); } finally { discoveryLock.writeLock().unlock(); } } void dumpAllSwitches(); void completeSwitchActivation(DatapathId dpId); @Override @NewCorrelationContextRequired void switchAdded(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchRemoved(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchActivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchDeactivated(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchChanged(final DatapathId switchId); @Override @NewCorrelationContextRequired void switchPortChanged(final DatapathId switchId, final OFPortDesc portDesc, final PortChangeType type); @Override void setup(FloodlightModuleContext context); }
|
@Test public void installDropRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installDropFlow(defaultDpid); assertEquals(scheme.installDropFlowRule(), capture.getValue()); }
|
@Override public Long installDropFlow(final DatapathId dpid) throws SwitchOperationException { return installDropFlowForTable(dpid, INPUT_TABLE_ID, DROP_RULE_COOKIE); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlow(final DatapathId dpid) throws SwitchOperationException { return installDropFlowForTable(dpid, INPUT_TABLE_ID, DROP_RULE_COOKIE); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlow(final DatapathId dpid) throws SwitchOperationException { return installDropFlowForTable(dpid, INPUT_TABLE_ID, DROP_RULE_COOKIE); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlow(final DatapathId dpid) throws SwitchOperationException { return installDropFlowForTable(dpid, INPUT_TABLE_ID, DROP_RULE_COOKIE); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlow(final DatapathId dpid) throws SwitchOperationException { return installDropFlowForTable(dpid, INPUT_TABLE_ID, DROP_RULE_COOKIE); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void installVerificationUnicastRule() throws Exception { mockGetMetersRequest(Lists.newArrayList(broadcastMeterId), true, 10L); mockBarrierRequest(); expect(iofSwitch.write(anyObject(OFMeterMod.class))).andReturn(true).times(1); Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installVerificationRule(defaultDpid, false); assertEquals(scheme.installVerificationUnicastRule(defaultDpid), capture.getValue()); }
|
@Override public Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast) throws SwitchOperationException { String ruleName = (isBroadcast) ? "Broadcast" : "Unicast"; String flowName = ruleName + "--VerificationFlow--"; return installDefaultFlow(dpid, switchFlowFactory.getVerificationFlow(isBroadcast), flowName); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast) throws SwitchOperationException { String ruleName = (isBroadcast) ? "Broadcast" : "Unicast"; String flowName = ruleName + "--VerificationFlow--"; return installDefaultFlow(dpid, switchFlowFactory.getVerificationFlow(isBroadcast), flowName); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast) throws SwitchOperationException { String ruleName = (isBroadcast) ? "Broadcast" : "Unicast"; String flowName = ruleName + "--VerificationFlow--"; return installDefaultFlow(dpid, switchFlowFactory.getVerificationFlow(isBroadcast), flowName); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast) throws SwitchOperationException { String ruleName = (isBroadcast) ? "Broadcast" : "Unicast"; String flowName = ruleName + "--VerificationFlow--"; return installDefaultFlow(dpid, switchFlowFactory.getVerificationFlow(isBroadcast), flowName); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast) throws SwitchOperationException { String ruleName = (isBroadcast) ? "Broadcast" : "Unicast"; String flowName = ruleName + "--VerificationFlow--"; return installDefaultFlow(dpid, switchFlowFactory.getVerificationFlow(isBroadcast), flowName); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void installDropLoopRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installDropLoopRule(dpid); OFFlowMod result = capture.getValue(); assertEquals(scheme.installDropLoopRule(dpid), result); }
|
@Override public Long installDropLoopRule(DatapathId dpid) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropLoopFlowGenerator(), "--DropLoopRule--"); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropLoopRule(DatapathId dpid) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropLoopFlowGenerator(), "--DropLoopRule--"); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropLoopRule(DatapathId dpid) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropLoopFlowGenerator(), "--DropLoopRule--"); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropLoopRule(DatapathId dpid) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropLoopFlowGenerator(), "--DropLoopRule--"); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropLoopRule(DatapathId dpid) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropLoopFlowGenerator(), "--DropLoopRule--"); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void installDropFlowForTable() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installDropFlowForTable(dpid, 1, DROP_RULE_COOKIE); OFFlowMod result = capture.getValue(); assertEquals(scheme.installDropFlowForTable(dpid, 1, DROP_RULE_COOKIE), result); }
|
@Override public Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropFlowGenerator(cookie, tableId), "--DropRule--"); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropFlowGenerator(cookie, tableId), "--DropRule--"); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropFlowGenerator(cookie, tableId), "--DropRule--"); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropFlowGenerator(cookie, tableId), "--DropRule--"); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public Long installDropFlowForTable(final DatapathId dpid, final int tableId, final long cookie) throws SwitchOperationException { return installDefaultFlow(dpid, switchFlowFactory.getDropFlowGenerator(cookie, tableId), "--DropRule--"); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void installEgressIslVxlanRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installEgressIslVxlanRule(dpid, 1); OFFlowMod result = capture.getValue(); assertEquals(scheme.installEgressIslVxlanRule(dpid, 1), result); }
|
@Override public long installEgressIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVxlanRule(ofFactory, dpid, port); String flowName = "--Isl egress rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVxlanRule(ofFactory, dpid, port); String flowName = "--Isl egress rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVxlanRule(ofFactory, dpid, port); String flowName = "--Isl egress rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVxlanRule(ofFactory, dpid, port); String flowName = "--Isl egress rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVxlanRule(ofFactory, dpid, port); String flowName = "--Isl egress rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void installTransitIslVxlanRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installTransitIslVxlanRule(dpid, 1); OFFlowMod result = capture.getValue(); assertEquals(scheme.installTransitIslVxlanRule(dpid, 1), result); }
|
@Override public long installTransitIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildTransitIslVxlanRule(ofFactory, port); String flowName = "--Isl transit rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installTransitIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildTransitIslVxlanRule(ofFactory, port); String flowName = "--Isl transit rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installTransitIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildTransitIslVxlanRule(ofFactory, port); String flowName = "--Isl transit rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installTransitIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildTransitIslVxlanRule(ofFactory, port); String flowName = "--Isl transit rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installTransitIslVxlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildTransitIslVxlanRule(ofFactory, port); String flowName = "--Isl transit rule for VXLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void installEgressIslVlanRule() throws Exception { Capture<OFFlowMod> capture = prepareForInstallTest(); switchManager.installEgressIslVlanRule(dpid, 1); OFFlowMod result = capture.getValue(); assertEquals(scheme.installEgressIslVlanRule(dpid, 1), result); }
|
@Override public long installEgressIslVlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVlanRule(ofFactory, port); String flowName = "--Isl egress rule for VLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVlanRule(ofFactory, port); String flowName = "--Isl egress rule for VLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVlanRule(ofFactory, port); String flowName = "--Isl egress rule for VLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVlanRule(ofFactory, port); String flowName = "--Isl egress rule for VLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); }
|
SwitchManager implements IFloodlightModule, IFloodlightService, ISwitchManager, IOFMessageListener { @Override public long installEgressIslVlanRule(DatapathId dpid, int port) throws SwitchOperationException { IOFSwitch sw = lookupSwitch(dpid); OFFactory ofFactory = sw.getOFFactory(); OFFlowMod flowMod = buildEgressIslVlanRule(ofFactory, port); String flowName = "--Isl egress rule for VLAN--" + dpid.toString(); pushFlow(sw, flowName, flowMod); return flowMod.getCookie().getValue(); } @Override Collection<Class<? extends IFloodlightService>> getModuleServices(); @Override Map<Class<? extends IFloodlightService>, IFloodlightService> getServiceImpls(); @Override Collection<Class<? extends IFloodlightService>> getModuleDependencies(); @Override void init(FloodlightModuleContext context); @Override void startUp(FloodlightModuleContext context); @Override @NewCorrelationContextRequired Command receive(IOFSwitch sw, OFMessage msg, FloodlightContext cntx); @Override String getName(); @Override void activate(DatapathId dpid); @Override void deactivate(DatapathId dpid); @Override boolean isCallbackOrderingPrereq(OFType type, String name); @Override boolean isCallbackOrderingPostreq(OFType type, String name); @Override ConnectModeRequest.Mode connectMode(final ConnectModeRequest.Mode mode); @Override List<Long> installDefaultRules(final DatapathId dpid); @Override long installIngressFlow(DatapathId dpid, DatapathId dstDpid, String flowId, Long cookie, int inputPort,
int outputPort, int inputVlanId, int transitTunnelId, OutputVlanType outputVlanType,
long meterId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installServer42IngressFlow(
DatapathId dpid, DatapathId dstDpid, Long cookie, org.openkilda.model.MacAddress server42MacAddress,
int server42Port, int outputPort, int customerPort, int inputVlanId, int transitTunnelId,
OutputVlanType outputVlanType, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installEgressFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, int outputVlanId, OutputVlanType outputVlanType,
FlowEncapsulationType encapsulationType,
boolean multiTable); @Override long installTransitFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int transitTunnelId, FlowEncapsulationType encapsulationType, boolean multiTable); @Override long installOneSwitchFlow(DatapathId dpid, String flowId, Long cookie, int inputPort, int outputPort,
int inputVlanId, int outputVlanId, OutputVlanType outputVlanType, long meterId,
boolean multiTable); @Override void installOuterVlanMatchSharedFlow(SwitchId switchId, String flowId, FlowSharedSegmentCookie cookie); @Override List<OFFlowMod> getExpectedDefaultFlows(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<MeterEntry> getExpectedDefaultMeters(DatapathId dpid, boolean multiTable, boolean switchLldp,
boolean switchArp); @Override List<OFFlowMod> getExpectedIslFlowsForPort(DatapathId dpid, int port); @Override List<OFFlowStatsEntry> dumpFlowTable(final DatapathId dpid); @Override List<OFMeterConfig> dumpMeters(final DatapathId dpid); @Override OFMeterConfig dumpMeterById(final DatapathId dpid, final long meterId); @Override void installMeterForFlow(DatapathId dpid, long bandwidth, final long meterId); @Override void modifyMeterForFlow(DatapathId dpid, long meterId, long bandwidth); @Override Map<DatapathId, IOFSwitch> getAllSwitchMap(boolean visible); @Override void deleteMeter(final DatapathId dpid, final long meterId); @Override List<Long> deleteAllNonDefaultRules(final DatapathId dpid); @Override List<Long> deleteRulesByCriteria(DatapathId dpid, boolean multiTable, RuleType ruleType,
DeleteRulesCriteria... criteria); @Override List<Long> deleteDefaultRules(DatapathId dpid, List<Integer> islPorts,
List<Integer> flowPorts, Set<Integer> flowLldpPorts,
Set<Integer> flowArpPorts, Set<Integer> server42FlowRttPorts,
boolean multiTable, boolean switchLldp, boolean switchArp,
boolean server42FlowRtt); @Override Long installUnicastVerificationRuleVxlan(final DatapathId dpid); @Override Long installVerificationRule(final DatapathId dpid, final boolean isBroadcast); @Override List<OFGroupDescStatsEntry> dumpGroups(DatapathId dpid); @Override void installDropFlowCustom(final DatapathId dpid, String dstMac, String dstMask,
final long cookie, final int priority); @Override Long installDropFlow(final DatapathId dpid); @Override Long installDropFlowForTable(final DatapathId dpid, final int tableId,
final long cookie); @Override Long installBfdCatchFlow(DatapathId dpid); @Override Long installRoundTripLatencyFlow(DatapathId dpid); @Override long installEgressIslVxlanRule(DatapathId dpid, int port); @Override long removeEgressIslVxlanRule(DatapathId dpid, int port); @Override long installTransitIslVxlanRule(DatapathId dpid, int port); @Override long removeTransitIslVxlanRule(DatapathId dpid, int port); @Override long installEgressIslVlanRule(DatapathId dpid, int port); Long installLldpTransitFlow(DatapathId dpid); @Override Long installLldpInputPreDropFlow(DatapathId dpid); @Override Long installArpTransitFlow(DatapathId dpid); @Override Long installArpInputPreDropFlow(DatapathId dpid); @Override Long installServer42InputFlow(DatapathId dpid, int server42Port, int customerPort,
org.openkilda.model.MacAddress server42macAddress); @Override Long installServer42TurningFlow(DatapathId dpid); @Override Long installServer42OutputVlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override Long installServer42OutputVxlanFlow(
DatapathId dpid, int port, int vlan, org.openkilda.model.MacAddress macAddress); @Override long removeEgressIslVlanRule(DatapathId dpid, int port); @Override long installIntermediateIngressRule(DatapathId dpid, int port); @Override long removeIntermediateIngressRule(DatapathId dpid, int port); @Override long removeLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeArpInputCustomerFlow(DatapathId dpid, int port); @Override Long removeServer42InputFlow(DatapathId dpid, int port); @Override OFFlowMod buildIntermediateIngressRule(DatapathId dpid, int port); @Override long installLldpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildLldpInputCustomerFlow(DatapathId dpid, int port); @Override Long installLldpIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressFlow(DatapathId dpid); @Override Long installLldpPostIngressVxlanFlow(DatapathId dpid); @Override Long installLldpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installArpInputCustomerFlow(DatapathId dpid, int port); @Override OFFlowMod buildArpInputCustomerFlow(DatapathId dpid, int port); @Override List<OFFlowMod> buildExpectedServer42Flows(
DatapathId dpid, int server42Port, int server42Vlan, org.openkilda.model.MacAddress server42MacAddress,
Set<Integer> customerPorts); @Override Long installArpIngressFlow(DatapathId dpid); @Override Long installArpPostIngressFlow(DatapathId dpid); @Override Long installArpPostIngressVxlanFlow(DatapathId dpid); @Override Long installArpPostIngressOneSwitchFlow(DatapathId dpid); @Override Long installPreIngressTablePassThroughDefaultRule(DatapathId dpid); @Override Long installEgressTablePassThroughDefaultRule(DatapathId dpid); @Override List<Long> installMultitableEndpointIslRules(DatapathId dpid, int port); @Override List<Long> removeMultitableEndpointIslRules(DatapathId dpid, int port); @Override Long installDropLoopRule(DatapathId dpid); @Override IOFSwitch lookupSwitch(DatapathId dpId); @Override InetAddress getSwitchIpAddress(IOFSwitch sw); @Override List<OFPortDesc> getEnabledPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(DatapathId dpId); @Override List<OFPortDesc> getPhysicalPorts(IOFSwitch sw); @Override void safeModeTick(); @Override void configurePort(DatapathId dpId, int portNumber, Boolean portAdminDown); @Override List<OFPortDesc> dumpPortsDescription(DatapathId dpid); @Override SwitchManagerConfig getSwitchManagerConfig(); static final long FLOW_COOKIE_MASK; static final int VERIFICATION_RULE_PRIORITY; static final int VERIFICATION_RULE_VXLAN_PRIORITY; static final int DROP_VERIFICATION_LOOP_RULE_PRIORITY; static final int CATCH_BFD_RULE_PRIORITY; static final int ROUND_TRIP_LATENCY_RULE_PRIORITY; static final int FLOW_PRIORITY; static final int ISL_EGRESS_VXLAN_RULE_PRIORITY_MULTITABLE; static final int ISL_TRANSIT_VXLAN_RULE_PRIORITY_MULTITABLE; static final int INGRESS_CUSTOMER_PORT_RULE_PRIORITY_MULTITABLE; static final int ISL_EGRESS_VLAN_RULE_PRIORITY_MULTITABLE; static final int DEFAULT_FLOW_PRIORITY; static final int MINIMAL_POSITIVE_PRIORITY; static final int SERVER_42_INPUT_PRIORITY; static final int SERVER_42_TURNING_PRIORITY; static final int SERVER_42_OUTPUT_VLAN_PRIORITY; static final int SERVER_42_OUTPUT_VXLAN_PRIORITY; static final int LLDP_INPUT_PRE_DROP_PRIORITY; static final int LLDP_TRANSIT_ISL_PRIORITY; static final int LLDP_INPUT_CUSTOMER_PRIORITY; static final int LLDP_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_PRIORITY; static final int LLDP_POST_INGRESS_VXLAN_PRIORITY; static final int LLDP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int ARP_INPUT_PRE_DROP_PRIORITY; static final int ARP_TRANSIT_ISL_PRIORITY; static final int ARP_INPUT_CUSTOMER_PRIORITY; static final int ARP_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_PRIORITY; static final int ARP_POST_INGRESS_VXLAN_PRIORITY; static final int ARP_POST_INGRESS_ONE_SWITCH_PRIORITY; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY_OFFSET; static final int SERVER_42_INGRESS_DEFAULT_FLOW_PRIORITY; static final int BDF_DEFAULT_PORT; static final int ROUND_TRIP_LATENCY_GROUP_ID; static final MacAddress STUB_VXLAN_ETH_DST_MAC; static final IPv4Address STUB_VXLAN_IPV4_SRC; static final IPv4Address STUB_VXLAN_IPV4_DST; static final int STUB_VXLAN_UDP_SRC; static final int ARP_VXLAN_UDP_SRC; static final int SERVER_42_FORWARD_UDP_PORT; static final int SERVER_42_REVERSE_UDP_PORT; static final int VXLAN_UDP_DST; static final int ETH_SRC_OFFSET; static final int INTERNAL_ETH_SRC_OFFSET; static final int MAC_ADDRESS_SIZE_IN_BITS; static final int TABLE_1; static final int INPUT_TABLE_ID; static final int PRE_INGRESS_TABLE_ID; static final int INGRESS_TABLE_ID; static final int POST_INGRESS_TABLE_ID; static final int EGRESS_TABLE_ID; static final int TRANSIT_TABLE_ID; static final int NOVIFLOW_TIMESTAMP_SIZE_IN_BITS; }
|
@Test public void shouldSetNullPopWhenPopIsEmptyString() throws SwitchNotFoundException { Switch sw = Switch.builder().switchId(TEST_SWITCH_ID).status(SwitchStatus.ACTIVE).build(); switchRepository.add(sw); SwitchPatch switchPatch = new SwitchPatch("", null); switchOperationsService.patchSwitch(TEST_SWITCH_ID, switchPatch); Switch updatedSwitch = switchRepository.findById(TEST_SWITCH_ID).get(); assertNull(updatedSwitch.getPop()); }
|
public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
SwitchOperationsService implements ILinkOperationsServiceCarrier { public Switch patchSwitch(SwitchId switchId, SwitchPatch data) throws SwitchNotFoundException { return transactionManager.doInTransaction(() -> { Switch foundSwitch = switchRepository.findById(switchId) .orElseThrow(() -> new SwitchNotFoundException(switchId)); Optional.ofNullable(data.getPop()).ifPresent(pop -> foundSwitch.setPop(!"".equals(pop) ? pop : null)); Optional.ofNullable(data.getLocation()).ifPresent(location -> { Optional.ofNullable(location.getLatitude()).ifPresent(foundSwitch::setLatitude); Optional.ofNullable(location.getLongitude()).ifPresent(foundSwitch::setLongitude); Optional.ofNullable(location.getStreet()).ifPresent(foundSwitch::setStreet); Optional.ofNullable(location.getCity()).ifPresent(foundSwitch::setCity); Optional.ofNullable(location.getCountry()).ifPresent(foundSwitch::setCountry); }); switchRepository.detach(foundSwitch); return foundSwitch; }); } SwitchOperationsService(RepositoryFactory repositoryFactory,
TransactionManager transactionManager,
SwitchOperationsServiceCarrier carrier); GetSwitchResponse getSwitch(SwitchId switchId); List<GetSwitchResponse> getAllSwitches(); Switch updateSwitchUnderMaintenanceFlag(SwitchId switchId, boolean underMaintenance); boolean deleteSwitch(SwitchId switchId, boolean force); void checkSwitchIsDeactivated(SwitchId switchId); void checkSwitchHasNoFlows(SwitchId switchId); void checkSwitchHasNoFlowSegments(SwitchId switchId); void checkSwitchHasNoIsls(SwitchId switchId); SwitchPropertiesDto getSwitchProperties(SwitchId switchId); SwitchPropertiesDto updateSwitchProperties(SwitchId switchId, SwitchPropertiesDto switchPropertiesDto); PortProperties getPortProperties(SwitchId switchId, int port); Collection<SwitchConnectedDevice> getSwitchConnectedDevices(
SwitchId switchId); List<IslEndpoint> getSwitchIslEndpoints(SwitchId switchId); Switch patchSwitch(SwitchId switchId, SwitchPatch data); }
|
@Test public void testConnectSchemaMetadataTranslation() { assertEquals(new SchemaAndValue(Schema.BOOLEAN_SCHEMA, true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\" }, \"payload\": true }".getBytes())); assertEquals(new SchemaAndValue(Schema.OPTIONAL_BOOLEAN_SCHEMA, null), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": true }, \"payload\": null }".getBytes())); assertEquals(new SchemaAndValue(SchemaBuilder.bool().defaultValue(true).build(), true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"default\": true }, \"payload\": null }".getBytes())); assertEquals(new SchemaAndValue(SchemaBuilder.bool().required().name("bool").version(2).doc("the documentation").parameter("foo", "bar").build(), true), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"boolean\", \"optional\": false, \"name\": \"bool\", \"version\": 2, \"doc\": \"the documentation\", \"parameters\": { \"foo\": \"bar\" }}, \"payload\": true }".getBytes())); }
|
@Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
JsonConverter implements Converter { @Override public SchemaAndValue toConnectData(String topic, byte[] value) { JsonNode jsonValue; try { jsonValue = deserializer.deserialize(topic, value); } catch (SerializationException e) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } if (enableSchemas && (jsonValue == null || !jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has("schema") || !jsonValue.has("payload"))) throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); if (!enableSchemas) { ObjectNode envelope = JsonNodeFactory.instance.objectNode(); envelope.set("schema", null); envelope.set("payload", jsonValue); jsonValue = envelope; } return jsonToConnect(jsonValue); } @Override void configure(Map<String, ?> configs, boolean isKey); @Override byte[] fromConnectData(String topic, Schema schema, Object value); @Override SchemaAndValue toConnectData(String topic, byte[] value); ObjectNode asJsonSchema(Schema schema); Schema asConnectSchema(JsonNode jsonSchema); }
|
@Test public void shouldRebalanceTasksToClientsBasedOnCapacity() throws Exception { createClientWithPreviousActiveTasks(p2, 1, task00, task03, task02); createClient(p3, 2); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task03); taskAssignor.assign(0); assertThat(clients.get(p2).assignedTaskCount(), equalTo(1)); assertThat(clients.get(p3).assignedTaskCount(), equalTo(2)); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldMoveMinimalNumberOfTasksWhenPreviouslyAboveCapacityAndNewClientAdded() throws Exception { final Set<TaskId> p1PrevTasks = Utils.mkSet(task00, task02); final Set<TaskId> p2PrevTasks = Utils.mkSet(task01, task03); createClientWithPreviousActiveTasks(p1, 1, task00, task02); createClientWithPreviousActiveTasks(p2, 1, task01, task03); createClientWithPreviousActiveTasks(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task02, task01, task03); taskAssignor.assign(0); final Set<TaskId> p3ActiveTasks = clients.get(p3).activeTasks(); assertThat(p3ActiveTasks.size(), equalTo(1)); if (p1PrevTasks.removeAll(p3ActiveTasks)) { assertThat(clients.get(p2).activeTasks(), equalTo(p2PrevTasks)); } else { assertThat(clients.get(p1).activeTasks(), equalTo(p1PrevTasks)); } }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldNotMoveAnyTasksWhenNewTasksAdded() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task00, task01); createClientWithPreviousActiveTasks(p2, 1, task02, task03); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task03, task01, task04, task02, task00, task05); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), hasItems(task00, task01)); assertThat(clients.get(p2).activeTasks(), hasItems(task02, task03)); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldAssignNewTasksToNewClientWhenPreviousTasksAssignedToOldClients() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task02, task01); createClientWithPreviousActiveTasks(p2, 1, task00, task03); createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task03, task01, task04, task02, task00, task05); taskAssignor.assign(0); assertThat(clients.get(p1).activeTasks(), hasItems(task02, task01)); assertThat(clients.get(p2).activeTasks(), hasItems(task00, task03)); assertThat(clients.get(p3).activeTasks(), hasItems(task04, task05)); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldAssignTasksNotPreviouslyActiveToNewClient() throws Exception { final TaskId task10 = new TaskId(0, 10); final TaskId task11 = new TaskId(0, 11); final TaskId task12 = new TaskId(1, 2); final TaskId task13 = new TaskId(1, 3); final TaskId task20 = new TaskId(2, 0); final TaskId task21 = new TaskId(2, 1); final TaskId task22 = new TaskId(2, 2); final TaskId task23 = new TaskId(2, 3); final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task01, task12, task13); c1.addPreviousStandbyTasks(Utils.mkSet(task00, task11, task20, task21, task23)); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task00, task11, task22); c2.addPreviousStandbyTasks(Utils.mkSet(task01, task10, task02, task20, task03, task12, task21, task13, task23)); final ClientState c3 = createClientWithPreviousActiveTasks(p3, 1, task20, task21, task23); c3.addPreviousStandbyTasks(Utils.mkSet(task02, task12)); final ClientState newClient = createClient(p4, 1); newClient.addPreviousStandbyTasks(Utils.mkSet(task00, task10, task01, task02, task11, task20, task03, task12, task21, task13, task22, task23)); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task10, task01, task02, task11, task20, task03, task12, task21, task13, task22, task23); taskAssignor.assign(0); assertThat(c1.activeTasks(), equalTo(Utils.mkSet(task01, task12, task13))); assertThat(c2.activeTasks(), equalTo(Utils.mkSet(task00, task11, task22))); assertThat(c3.activeTasks(), equalTo(Utils.mkSet(task20, task21, task23))); assertThat(newClient.activeTasks(), equalTo(Utils.mkSet(task02, task03, task10))); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldAssignTasksNotPreviouslyActiveToMultipleNewClients() throws Exception { final TaskId task10 = new TaskId(0, 10); final TaskId task11 = new TaskId(0, 11); final TaskId task12 = new TaskId(1, 2); final TaskId task13 = new TaskId(1, 3); final TaskId task20 = new TaskId(2, 0); final TaskId task21 = new TaskId(2, 1); final TaskId task22 = new TaskId(2, 2); final TaskId task23 = new TaskId(2, 3); final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task01, task12, task13); c1.addPreviousStandbyTasks(Utils.mkSet(task00, task11, task20, task21, task23)); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task00, task11, task22); c2.addPreviousStandbyTasks(Utils.mkSet(task01, task10, task02, task20, task03, task12, task21, task13, task23)); final ClientState bounce1 = createClient(p3, 1); bounce1.addPreviousStandbyTasks(Utils.mkSet(task20, task21, task23)); final ClientState bounce2 = createClient(p4, 1); bounce2.addPreviousStandbyTasks(Utils.mkSet(task02, task03, task10)); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task10, task01, task02, task11, task20, task03, task12, task21, task13, task22, task23); taskAssignor.assign(0); assertThat(c1.activeTasks(), equalTo(Utils.mkSet(task01, task12, task13))); assertThat(c2.activeTasks(), equalTo(Utils.mkSet(task00, task11, task22))); assertThat(bounce1.activeTasks(), equalTo(Utils.mkSet(task20, task21, task23))); assertThat(bounce2.activeTasks(), equalTo(Utils.mkSet(task02, task03, task10))); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldAssignTasksToNewClient() throws Exception { createClientWithPreviousActiveTasks(p1, 1, task01, task02); createClient(p2, 1); createTaskAssignor(task01, task02).assign(0); assertThat(clients.get(p1).activeTaskCount(), equalTo(1)); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldAssignTasksToNewClientWithoutFlippingAssignmentBetweenExistingClients() throws Exception { final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task00, task01, task02); final ClientState c2 = createClientWithPreviousActiveTasks(p2, 1, task03, task04, task05); final ClientState newClient = createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02, task03, task04, task05); taskAssignor.assign(0); assertThat(c1.activeTasks(), not(hasItem(task03))); assertThat(c1.activeTasks(), not(hasItem(task04))); assertThat(c1.activeTasks(), not(hasItem(task05))); assertThat(c1.activeTaskCount(), equalTo(2)); assertThat(c2.activeTasks(), not(hasItems(task00))); assertThat(c2.activeTasks(), not(hasItems(task01))); assertThat(c2.activeTasks(), not(hasItems(task02))); assertThat(c2.activeTaskCount(), equalTo(2)); assertThat(newClient.activeTaskCount(), equalTo(2)); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldAssignTasksToNewClientWithoutFlippingAssignmentBetweenExistingAndBouncedClients() throws Exception { final TaskId task06 = new TaskId(0, 6); final ClientState c1 = createClientWithPreviousActiveTasks(p1, 1, task00, task01, task02, task06); final ClientState c2 = createClient(p2, 1); c2.addPreviousStandbyTasks(Utils.mkSet(task03, task04, task05)); final ClientState newClient = createClient(p3, 1); final StickyTaskAssignor<Integer> taskAssignor = createTaskAssignor(task00, task01, task02, task03, task04, task05, task06); taskAssignor.assign(0); assertThat(c1.activeTasks(), not(hasItem(task03))); assertThat(c1.activeTasks(), not(hasItem(task04))); assertThat(c1.activeTasks(), not(hasItem(task05))); assertThat(c1.activeTaskCount(), equalTo(3)); assertThat(c2.activeTasks(), not(hasItems(task00))); assertThat(c2.activeTasks(), not(hasItems(task01))); assertThat(c2.activeTasks(), not(hasItems(task02))); assertThat(c2.activeTaskCount(), equalTo(2)); assertThat(newClient.activeTaskCount(), equalTo(2)); }
|
@Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
StickyTaskAssignor implements TaskAssignor<ID, TaskId> { @Override public void assign(final int numStandbyReplicas) { assignActive(); assignStandby(numStandbyReplicas); } StickyTaskAssignor(final Map<ID, ClientState> clients, final Set<TaskId> taskIds); @Override void assign(final int numStandbyReplicas); }
|
@Test public void shouldBeBackwardCompatible() throws Exception { UUID processId = UUID.randomUUID(); Set<TaskId> activeTasks = new HashSet<>(Arrays.asList(new TaskId(0, 0), new TaskId(0, 1), new TaskId(1, 0))); Set<TaskId> standbyTasks = new HashSet<>(Arrays.asList(new TaskId(1, 1), new TaskId(2, 0))); final ByteBuffer v1Encoding = encodePreviousVersion(processId, activeTasks, standbyTasks); final SubscriptionInfo decode = SubscriptionInfo.decode(v1Encoding); assertEquals(activeTasks, decode.prevTasks); assertEquals(standbyTasks, decode.standbyTasks); assertEquals(processId, decode.processId); assertNull(decode.userEndPoint); }
|
public static SubscriptionInfo decode(ByteBuffer data) { data.rewind(); int version = data.getInt(); if (version == CURRENT_VERSION || version == 1) { UUID processId = new UUID(data.getLong(), data.getLong()); Set<TaskId> prevTasks = new HashSet<>(); int numPrevs = data.getInt(); for (int i = 0; i < numPrevs; i++) { TaskId id = TaskId.readFrom(data); prevTasks.add(id); } Set<TaskId> standbyTasks = new HashSet<>(); int numCached = data.getInt(); for (int i = 0; i < numCached; i++) { standbyTasks.add(TaskId.readFrom(data)); } String userEndPoint = null; if (version == CURRENT_VERSION) { int bytesLength = data.getInt(); if (bytesLength != 0) { byte[] bytes = new byte[bytesLength]; data.get(bytes); userEndPoint = new String(bytes, Charset.forName("UTF-8")); } } return new SubscriptionInfo(version, processId, prevTasks, standbyTasks, userEndPoint); } else { TaskAssignmentException ex = new TaskAssignmentException("unable to decode subscription data: version=" + version); log.error(ex.getMessage(), ex); throw ex; } }
|
SubscriptionInfo { public static SubscriptionInfo decode(ByteBuffer data) { data.rewind(); int version = data.getInt(); if (version == CURRENT_VERSION || version == 1) { UUID processId = new UUID(data.getLong(), data.getLong()); Set<TaskId> prevTasks = new HashSet<>(); int numPrevs = data.getInt(); for (int i = 0; i < numPrevs; i++) { TaskId id = TaskId.readFrom(data); prevTasks.add(id); } Set<TaskId> standbyTasks = new HashSet<>(); int numCached = data.getInt(); for (int i = 0; i < numCached; i++) { standbyTasks.add(TaskId.readFrom(data)); } String userEndPoint = null; if (version == CURRENT_VERSION) { int bytesLength = data.getInt(); if (bytesLength != 0) { byte[] bytes = new byte[bytesLength]; data.get(bytes); userEndPoint = new String(bytes, Charset.forName("UTF-8")); } } return new SubscriptionInfo(version, processId, prevTasks, standbyTasks, userEndPoint); } else { TaskAssignmentException ex = new TaskAssignmentException("unable to decode subscription data: version=" + version); log.error(ex.getMessage(), ex); throw ex; } } }
|
SubscriptionInfo { public static SubscriptionInfo decode(ByteBuffer data) { data.rewind(); int version = data.getInt(); if (version == CURRENT_VERSION || version == 1) { UUID processId = new UUID(data.getLong(), data.getLong()); Set<TaskId> prevTasks = new HashSet<>(); int numPrevs = data.getInt(); for (int i = 0; i < numPrevs; i++) { TaskId id = TaskId.readFrom(data); prevTasks.add(id); } Set<TaskId> standbyTasks = new HashSet<>(); int numCached = data.getInt(); for (int i = 0; i < numCached; i++) { standbyTasks.add(TaskId.readFrom(data)); } String userEndPoint = null; if (version == CURRENT_VERSION) { int bytesLength = data.getInt(); if (bytesLength != 0) { byte[] bytes = new byte[bytesLength]; data.get(bytes); userEndPoint = new String(bytes, Charset.forName("UTF-8")); } } return new SubscriptionInfo(version, processId, prevTasks, standbyTasks, userEndPoint); } else { TaskAssignmentException ex = new TaskAssignmentException("unable to decode subscription data: version=" + version); log.error(ex.getMessage(), ex); throw ex; } } SubscriptionInfo(UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); private SubscriptionInfo(int version, UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); }
|
SubscriptionInfo { public static SubscriptionInfo decode(ByteBuffer data) { data.rewind(); int version = data.getInt(); if (version == CURRENT_VERSION || version == 1) { UUID processId = new UUID(data.getLong(), data.getLong()); Set<TaskId> prevTasks = new HashSet<>(); int numPrevs = data.getInt(); for (int i = 0; i < numPrevs; i++) { TaskId id = TaskId.readFrom(data); prevTasks.add(id); } Set<TaskId> standbyTasks = new HashSet<>(); int numCached = data.getInt(); for (int i = 0; i < numCached; i++) { standbyTasks.add(TaskId.readFrom(data)); } String userEndPoint = null; if (version == CURRENT_VERSION) { int bytesLength = data.getInt(); if (bytesLength != 0) { byte[] bytes = new byte[bytesLength]; data.get(bytes); userEndPoint = new String(bytes, Charset.forName("UTF-8")); } } return new SubscriptionInfo(version, processId, prevTasks, standbyTasks, userEndPoint); } else { TaskAssignmentException ex = new TaskAssignmentException("unable to decode subscription data: version=" + version); log.error(ex.getMessage(), ex); throw ex; } } SubscriptionInfo(UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); private SubscriptionInfo(int version, UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); ByteBuffer encode(); static SubscriptionInfo decode(ByteBuffer data); @Override int hashCode(); @Override boolean equals(Object o); }
|
SubscriptionInfo { public static SubscriptionInfo decode(ByteBuffer data) { data.rewind(); int version = data.getInt(); if (version == CURRENT_VERSION || version == 1) { UUID processId = new UUID(data.getLong(), data.getLong()); Set<TaskId> prevTasks = new HashSet<>(); int numPrevs = data.getInt(); for (int i = 0; i < numPrevs; i++) { TaskId id = TaskId.readFrom(data); prevTasks.add(id); } Set<TaskId> standbyTasks = new HashSet<>(); int numCached = data.getInt(); for (int i = 0; i < numCached; i++) { standbyTasks.add(TaskId.readFrom(data)); } String userEndPoint = null; if (version == CURRENT_VERSION) { int bytesLength = data.getInt(); if (bytesLength != 0) { byte[] bytes = new byte[bytesLength]; data.get(bytes); userEndPoint = new String(bytes, Charset.forName("UTF-8")); } } return new SubscriptionInfo(version, processId, prevTasks, standbyTasks, userEndPoint); } else { TaskAssignmentException ex = new TaskAssignmentException("unable to decode subscription data: version=" + version); log.error(ex.getMessage(), ex); throw ex; } } SubscriptionInfo(UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); private SubscriptionInfo(int version, UUID processId, Set<TaskId> prevTasks, Set<TaskId> standbyTasks, String userEndPoint); ByteBuffer encode(); static SubscriptionInfo decode(ByteBuffer data); @Override int hashCode(); @Override boolean equals(Object o); final int version; final UUID processId; final Set<TaskId> prevTasks; final Set<TaskId> standbyTasks; final String userEndPoint; }
|
@Test(expected = NotFoundException.class) public void testGetConnectorConfigConnectorNotFound() throws Throwable { final Capture<Callback<Map<String, String>>> cb = Capture.newInstance(); herder.connectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("not found")); PowerMock.replayAll(); connectorsResource.getConnectorConfig(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); }
|
@GET @Path("/{connector}/config") public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Map<String, String>> cb = new FutureCallback<>(); herder.connectorConfig(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "GET", null, forward); }
|
ConnectorsResource { @GET @Path("/{connector}/config") public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Map<String, String>> cb = new FutureCallback<>(); herder.connectorConfig(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "GET", null, forward); } }
|
ConnectorsResource { @GET @Path("/{connector}/config") public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Map<String, String>> cb = new FutureCallback<>(); herder.connectorConfig(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "GET", null, forward); } ConnectorsResource(Herder herder); }
|
ConnectorsResource { @GET @Path("/{connector}/config") public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Map<String, String>> cb = new FutureCallback<>(); herder.connectorConfig(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "GET", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
ConnectorsResource { @GET @Path("/{connector}/config") public Map<String, String> getConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<Map<String, String>> cb = new FutureCallback<>(); herder.connectorConfig(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "GET", null, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
@Test public void shouldHaveNotReachedCapacityWhenAssignedTasksLessThanCapacity() throws Exception { assertFalse(client.reachedCapacity()); }
|
boolean reachedCapacity() { return assignedTasks.size() >= capacity; }
|
ClientState { boolean reachedCapacity() { return assignedTasks.size() >= capacity; } }
|
ClientState { boolean reachedCapacity() { return assignedTasks.size() >= capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); }
|
ClientState { boolean reachedCapacity() { return assignedTasks.size() >= capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
ClientState { boolean reachedCapacity() { return assignedTasks.size() >= capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
@Test public void shouldHaveMoreAvailableCapacityWhenCapacityHigherAndSameAssignedTaskCount() throws Exception { final ClientState c2 = new ClientState(2); assertTrue(c2.hasMoreAvailableCapacityThan(client)); assertFalse(client.hasMoreAvailableCapacityThan(c2)); }
|
boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
@Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionIfCapacityOfThisClientStateIsZero() throws Exception { final ClientState c1 = new ClientState(0); c1.hasMoreAvailableCapacityThan(new ClientState(1)); }
|
boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
@Test(expected = IllegalStateException.class) public void shouldThrowIllegalStateExceptionIfCapacityOfOtherClientStateIsZero() throws Exception { final ClientState c1 = new ClientState(1); c1.hasMoreAvailableCapacityThan(new ClientState(0)); }
|
boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
ClientState { boolean hasMoreAvailableCapacityThan(final ClientState other) { if (this.capacity <= 0) { throw new IllegalStateException("Capacity of this ClientState must be greater than 0."); } if (other.capacity <= 0) { throw new IllegalStateException("Capacity of other ClientState must be greater than 0"); } final double otherLoad = (double) other.assignedTaskCount() / other.capacity; final double thisLoad = (double) assignedTaskCount() / capacity; if (thisLoad < otherLoad) return true; else if (thisLoad > otherLoad) return false; else return capacity > other.capacity; } ClientState(); ClientState(final int capacity); private ClientState(Set<TaskId> activeTasks, Set<TaskId> standbyTasks, Set<TaskId> assignedTasks, Set<TaskId> prevActiveTasks, Set<TaskId> prevAssignedTasks, int capacity); ClientState copy(); void assign(final TaskId taskId, final boolean active); Set<TaskId> activeTasks(); Set<TaskId> standbyTasks(); int assignedTaskCount(); void incrementCapacity(); int activeTaskCount(); void addPreviousActiveTasks(final Set<TaskId> prevTasks); void addPreviousStandbyTasks(final Set<TaskId> standbyTasks); @Override String toString(); }
|
@SuppressWarnings("unchecked") @Test public void testUnite() { QuickUnion<Long> qu = new QuickUnion<>(); long[] ids = { 1L, 2L, 3L, 4L, 5L }; for (long id : ids) { qu.add(id); } assertEquals(5, roots(qu, ids).size()); qu.unite(1L, 2L); assertEquals(4, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); qu.unite(3L, 4L); assertEquals(3, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); assertEquals(qu.root(3L), qu.root(4L)); qu.unite(1L, 5L); assertEquals(2, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); assertEquals(qu.root(2L), qu.root(5L)); assertEquals(qu.root(3L), qu.root(4L)); qu.unite(3L, 5L); assertEquals(1, roots(qu, ids).size()); assertEquals(qu.root(1L), qu.root(2L)); assertEquals(qu.root(2L), qu.root(3L)); assertEquals(qu.root(3L), qu.root(4L)); assertEquals(qu.root(4L), qu.root(5L)); }
|
public void unite(T id1, T... idList) { for (T id2 : idList) { unitePair(id1, id2); } }
|
QuickUnion { public void unite(T id1, T... idList) { for (T id2 : idList) { unitePair(id1, id2); } } }
|
QuickUnion { public void unite(T id1, T... idList) { for (T id2 : idList) { unitePair(id1, id2); } } }
|
QuickUnion { public void unite(T id1, T... idList) { for (T id2 : idList) { unitePair(id1, id2); } } void add(T id); boolean exists(T id); T root(T id); void unite(T id1, T... idList); }
|
QuickUnion { public void unite(T id1, T... idList) { for (T id2 : idList) { unitePair(id1, id2); } } void add(T id); boolean exists(T id); T root(T id); void unite(T id1, T... idList); }
|
@SuppressWarnings("unchecked") @Test public void shouldThrowStreamsExceptionWhenTimeoutExceptionThrown() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public Map<String, List<PartitionInfo>> listTopics() { throw new TimeoutException("KABOOM!"); } }; final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 0); try { changelogReader.validatePartitionExists(topicPartition, "store"); fail("Should have thrown streams exception"); } catch (final StreamsException e) { } }
|
@Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@Test(expected = StreamsException.class) public void shouldThrowStreamsExceptionIfPartitionDoesntExistAfterMaxWait() throws Exception { changelogReader.validatePartitionExists(topicPartition, "store"); }
|
@Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@SuppressWarnings("unchecked") @Test public void shouldFallbackToPartitionsForIfPartitionNotInAllPartitionsList() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public List<PartitionInfo> partitionsFor(final String topic) { return Collections.singletonList(partitionInfo); } }; final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 10); changelogReader.validatePartitionExists(topicPartition, "store"); }
|
@Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@SuppressWarnings("unchecked") @Test public void shouldThrowStreamsExceptionIfTimeoutOccursDuringPartitionsFor() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public List<PartitionInfo> partitionsFor(final String topic) { throw new TimeoutException("KABOOM!"); } }; final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, new MockTime(), 5); try { changelogReader.validatePartitionExists(topicPartition, "store"); fail("Should have thrown streams exception"); } catch (final StreamsException e) { } }
|
@Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@Test public void shouldPassIfTopicPartitionExists() throws Exception { consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(partitionInfo)); changelogReader.validatePartitionExists(topicPartition, "store"); }
|
@Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@Test public void testPutConnectorConfig() throws Throwable { final Capture<Callback<Herder.Created<ConnectorInfo>>> cb = Capture.newInstance(); herder.putConnectorConfig(EasyMock.eq(CONNECTOR_NAME), EasyMock.eq(CONNECTOR_CONFIG), EasyMock.eq(true), EasyMock.capture(cb)); expectAndCallbackResult(cb, new Herder.Created<>(false, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, CONNECTOR_TASK_NAMES))); PowerMock.replayAll(); connectorsResource.putConnectorConfig(CONNECTOR_NAME, FORWARD, CONNECTOR_CONFIG); PowerMock.verifyAll(); }
|
@PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
@SuppressWarnings("unchecked") @Test public void shouldRequestPartitionInfoIfItDoesntExist() throws Exception { final MockConsumer<byte[], byte[]> consumer = new MockConsumer(OffsetResetStrategy.EARLIEST) { @Override public Map<String, List<PartitionInfo>> listTopics() { return Collections.emptyMap(); } }; consumer.updatePartitions(topicPartition.topic(), Collections.singletonList(partitionInfo)); final StoreChangelogReader changelogReader = new StoreChangelogReader(consumer, Time.SYSTEM, 5000); changelogReader.validatePartitionExists(topicPartition, "store"); }
|
@Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { @Override public void validatePartitionExists(final TopicPartition topicPartition, final String storeName) { final long start = time.milliseconds(); if (partitionInfo.isEmpty()) { try { partitionInfo.putAll(consumer.listTopics()); } catch (final TimeoutException e) { log.warn("{} Could not list topics so will fall back to partition by partition fetching", logPrefix); } } final long endTime = time.milliseconds() + partitionValidationTimeoutMs; while (!hasPartition(topicPartition) && time.milliseconds() < endTime) { try { final List<PartitionInfo> partitions = consumer.partitionsFor(topicPartition.topic()); if (partitions != null) { partitionInfo.put(topicPartition.topic(), partitions); } } catch (final TimeoutException e) { throw new StreamsException(String.format("Could not fetch partition info for topic: %s before expiration of the configured request timeout", topicPartition.topic())); } } if (!hasPartition(topicPartition)) { throw new StreamsException(String.format("Store %s's change log (%s) does not contain partition %s", storeName, topicPartition.topic(), topicPartition.partition())); } log.debug("{} Took {} ms to validate that partition {} exists", logPrefix, time.milliseconds() - start, topicPartition); } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@Test public void shouldThrowExceptionIfConsumerHasCurrentSubscription() throws Exception { consumer.subscribe(Collections.singleton("sometopic")); try { changelogReader.restore(); fail("Should have thrown IllegalStateException"); } catch (final IllegalStateException e) { } }
|
public void restore() { final long start = time.milliseconds(); try { if (!consumer.subscription().isEmpty()) { throw new IllegalStateException(String.format("Restore consumer should have not subscribed to any partitions (%s) beforehand", consumer.subscription())); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(stateRestorers.keySet()); final Map<TopicPartition, StateRestorer> needsRestoring = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); Long offset = entry.getValue(); final StateRestorer restorer = stateRestorers.get(topicPartition); if (restorer.checkpoint() >= offset) { restorer.setRestoredOffset(restorer.checkpoint()); } else { needsRestoring.put(topicPartition, restorer); } } log.info("{} Starting restoring state stores from changelog topics {}", logPrefix, needsRestoring.keySet()); consumer.assign(needsRestoring.keySet()); final List<StateRestorer> needsPositionUpdate = new ArrayList<>(); for (final StateRestorer restorer : needsRestoring.values()) { if (restorer.checkpoint() != StateRestorer.NO_CHECKPOINT) { consumer.seek(restorer.partition(), restorer.checkpoint()); logRestoreOffsets(restorer.partition(), restorer.checkpoint(), endOffsets.get(restorer.partition())); restorer.setStartingOffset(consumer.position(restorer.partition())); } else { consumer.seekToBeginning(Collections.singletonList(restorer.partition())); needsPositionUpdate.add(restorer); } } for (final StateRestorer restorer : needsPositionUpdate) { final long position = consumer.position(restorer.partition()); restorer.setStartingOffset(position); logRestoreOffsets(restorer.partition(), position, endOffsets.get(restorer.partition())); } final Set<TopicPartition> partitions = new HashSet<>(needsRestoring.keySet()); while (!partitions.isEmpty()) { final ConsumerRecords<byte[], byte[]> allRecords = consumer.poll(10); final Iterator<TopicPartition> partitionIterator = partitions.iterator(); while (partitionIterator.hasNext()) { restorePartition(endOffsets, allRecords, partitionIterator); } } } finally { consumer.assign(Collections.<TopicPartition>emptyList()); log.debug("{} Took {} ms to restore all active states", logPrefix, time.milliseconds() - start); } }
|
StoreChangelogReader implements ChangelogReader { public void restore() { final long start = time.milliseconds(); try { if (!consumer.subscription().isEmpty()) { throw new IllegalStateException(String.format("Restore consumer should have not subscribed to any partitions (%s) beforehand", consumer.subscription())); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(stateRestorers.keySet()); final Map<TopicPartition, StateRestorer> needsRestoring = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); Long offset = entry.getValue(); final StateRestorer restorer = stateRestorers.get(topicPartition); if (restorer.checkpoint() >= offset) { restorer.setRestoredOffset(restorer.checkpoint()); } else { needsRestoring.put(topicPartition, restorer); } } log.info("{} Starting restoring state stores from changelog topics {}", logPrefix, needsRestoring.keySet()); consumer.assign(needsRestoring.keySet()); final List<StateRestorer> needsPositionUpdate = new ArrayList<>(); for (final StateRestorer restorer : needsRestoring.values()) { if (restorer.checkpoint() != StateRestorer.NO_CHECKPOINT) { consumer.seek(restorer.partition(), restorer.checkpoint()); logRestoreOffsets(restorer.partition(), restorer.checkpoint(), endOffsets.get(restorer.partition())); restorer.setStartingOffset(consumer.position(restorer.partition())); } else { consumer.seekToBeginning(Collections.singletonList(restorer.partition())); needsPositionUpdate.add(restorer); } } for (final StateRestorer restorer : needsPositionUpdate) { final long position = consumer.position(restorer.partition()); restorer.setStartingOffset(position); logRestoreOffsets(restorer.partition(), position, endOffsets.get(restorer.partition())); } final Set<TopicPartition> partitions = new HashSet<>(needsRestoring.keySet()); while (!partitions.isEmpty()) { final ConsumerRecords<byte[], byte[]> allRecords = consumer.poll(10); final Iterator<TopicPartition> partitionIterator = partitions.iterator(); while (partitionIterator.hasNext()) { restorePartition(endOffsets, allRecords, partitionIterator); } } } finally { consumer.assign(Collections.<TopicPartition>emptyList()); log.debug("{} Took {} ms to restore all active states", logPrefix, time.milliseconds() - start); } } }
|
StoreChangelogReader implements ChangelogReader { public void restore() { final long start = time.milliseconds(); try { if (!consumer.subscription().isEmpty()) { throw new IllegalStateException(String.format("Restore consumer should have not subscribed to any partitions (%s) beforehand", consumer.subscription())); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(stateRestorers.keySet()); final Map<TopicPartition, StateRestorer> needsRestoring = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); Long offset = entry.getValue(); final StateRestorer restorer = stateRestorers.get(topicPartition); if (restorer.checkpoint() >= offset) { restorer.setRestoredOffset(restorer.checkpoint()); } else { needsRestoring.put(topicPartition, restorer); } } log.info("{} Starting restoring state stores from changelog topics {}", logPrefix, needsRestoring.keySet()); consumer.assign(needsRestoring.keySet()); final List<StateRestorer> needsPositionUpdate = new ArrayList<>(); for (final StateRestorer restorer : needsRestoring.values()) { if (restorer.checkpoint() != StateRestorer.NO_CHECKPOINT) { consumer.seek(restorer.partition(), restorer.checkpoint()); logRestoreOffsets(restorer.partition(), restorer.checkpoint(), endOffsets.get(restorer.partition())); restorer.setStartingOffset(consumer.position(restorer.partition())); } else { consumer.seekToBeginning(Collections.singletonList(restorer.partition())); needsPositionUpdate.add(restorer); } } for (final StateRestorer restorer : needsPositionUpdate) { final long position = consumer.position(restorer.partition()); restorer.setStartingOffset(position); logRestoreOffsets(restorer.partition(), position, endOffsets.get(restorer.partition())); } final Set<TopicPartition> partitions = new HashSet<>(needsRestoring.keySet()); while (!partitions.isEmpty()) { final ConsumerRecords<byte[], byte[]> allRecords = consumer.poll(10); final Iterator<TopicPartition> partitionIterator = partitions.iterator(); while (partitionIterator.hasNext()) { restorePartition(endOffsets, allRecords, partitionIterator); } } } finally { consumer.assign(Collections.<TopicPartition>emptyList()); log.debug("{} Took {} ms to restore all active states", logPrefix, time.milliseconds() - start); } } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); }
|
StoreChangelogReader implements ChangelogReader { public void restore() { final long start = time.milliseconds(); try { if (!consumer.subscription().isEmpty()) { throw new IllegalStateException(String.format("Restore consumer should have not subscribed to any partitions (%s) beforehand", consumer.subscription())); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(stateRestorers.keySet()); final Map<TopicPartition, StateRestorer> needsRestoring = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); Long offset = entry.getValue(); final StateRestorer restorer = stateRestorers.get(topicPartition); if (restorer.checkpoint() >= offset) { restorer.setRestoredOffset(restorer.checkpoint()); } else { needsRestoring.put(topicPartition, restorer); } } log.info("{} Starting restoring state stores from changelog topics {}", logPrefix, needsRestoring.keySet()); consumer.assign(needsRestoring.keySet()); final List<StateRestorer> needsPositionUpdate = new ArrayList<>(); for (final StateRestorer restorer : needsRestoring.values()) { if (restorer.checkpoint() != StateRestorer.NO_CHECKPOINT) { consumer.seek(restorer.partition(), restorer.checkpoint()); logRestoreOffsets(restorer.partition(), restorer.checkpoint(), endOffsets.get(restorer.partition())); restorer.setStartingOffset(consumer.position(restorer.partition())); } else { consumer.seekToBeginning(Collections.singletonList(restorer.partition())); needsPositionUpdate.add(restorer); } } for (final StateRestorer restorer : needsPositionUpdate) { final long position = consumer.position(restorer.partition()); restorer.setStartingOffset(position); logRestoreOffsets(restorer.partition(), position, endOffsets.get(restorer.partition())); } final Set<TopicPartition> partitions = new HashSet<>(needsRestoring.keySet()); while (!partitions.isEmpty()) { final ConsumerRecords<byte[], byte[]> allRecords = consumer.poll(10); final Iterator<TopicPartition> partitionIterator = partitions.iterator(); while (partitionIterator.hasNext()) { restorePartition(endOffsets, allRecords, partitionIterator); } } } finally { consumer.assign(Collections.<TopicPartition>emptyList()); log.debug("{} Took {} ms to restore all active states", logPrefix, time.milliseconds() - start); } } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
StoreChangelogReader implements ChangelogReader { public void restore() { final long start = time.milliseconds(); try { if (!consumer.subscription().isEmpty()) { throw new IllegalStateException(String.format("Restore consumer should have not subscribed to any partitions (%s) beforehand", consumer.subscription())); } final Map<TopicPartition, Long> endOffsets = consumer.endOffsets(stateRestorers.keySet()); final Map<TopicPartition, StateRestorer> needsRestoring = new HashMap<>(); for (final Map.Entry<TopicPartition, Long> entry : endOffsets.entrySet()) { TopicPartition topicPartition = entry.getKey(); Long offset = entry.getValue(); final StateRestorer restorer = stateRestorers.get(topicPartition); if (restorer.checkpoint() >= offset) { restorer.setRestoredOffset(restorer.checkpoint()); } else { needsRestoring.put(topicPartition, restorer); } } log.info("{} Starting restoring state stores from changelog topics {}", logPrefix, needsRestoring.keySet()); consumer.assign(needsRestoring.keySet()); final List<StateRestorer> needsPositionUpdate = new ArrayList<>(); for (final StateRestorer restorer : needsRestoring.values()) { if (restorer.checkpoint() != StateRestorer.NO_CHECKPOINT) { consumer.seek(restorer.partition(), restorer.checkpoint()); logRestoreOffsets(restorer.partition(), restorer.checkpoint(), endOffsets.get(restorer.partition())); restorer.setStartingOffset(consumer.position(restorer.partition())); } else { consumer.seekToBeginning(Collections.singletonList(restorer.partition())); needsPositionUpdate.add(restorer); } } for (final StateRestorer restorer : needsPositionUpdate) { final long position = consumer.position(restorer.partition()); restorer.setStartingOffset(position); logRestoreOffsets(restorer.partition(), position, endOffsets.get(restorer.partition())); } final Set<TopicPartition> partitions = new HashSet<>(needsRestoring.keySet()); while (!partitions.isEmpty()) { final ConsumerRecords<byte[], byte[]> allRecords = consumer.poll(10); final Iterator<TopicPartition> partitionIterator = partitions.iterator(); while (partitionIterator.hasNext()) { restorePartition(endOffsets, allRecords, partitionIterator); } } } finally { consumer.assign(Collections.<TopicPartition>emptyList()); log.debug("{} Took {} ms to restore all active states", logPrefix, time.milliseconds() - start); } } StoreChangelogReader(final String threadId, final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); StoreChangelogReader(final Consumer<byte[], byte[]> consumer, final Time time, final long partitionValidationTimeoutMs); @Override void validatePartitionExists(final TopicPartition topicPartition, final String storeName); @Override void register(final StateRestorer restorer); void restore(); @Override Map<TopicPartition, Long> restoredOffsets(); }
|
@Test public void shouldReturnCorrectPartitionCounts() throws Exception { InternalTopicManager internalTopicManager = new InternalTopicManager(streamsKafkaClient, 1, WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); Assert.assertEquals(Collections.singletonMap(topic, 1), internalTopicManager.getNumPartitions(Collections.singleton(topic))); }
|
public Map<String, Integer> getNumPartitions(final Set<String> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); existingTopicPartitions.keySet().retainAll(topics); return existingTopicPartitions; } catch (StreamsException ex) { log.warn("Could not get number of partitions: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not get number of partitions."); }
|
InternalTopicManager { public Map<String, Integer> getNumPartitions(final Set<String> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); existingTopicPartitions.keySet().retainAll(topics); return existingTopicPartitions; } catch (StreamsException ex) { log.warn("Could not get number of partitions: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not get number of partitions."); } }
|
InternalTopicManager { public Map<String, Integer> getNumPartitions(final Set<String> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); existingTopicPartitions.keySet().retainAll(topics); return existingTopicPartitions; } catch (StreamsException ex) { log.warn("Could not get number of partitions: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not get number of partitions."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); }
|
InternalTopicManager { public Map<String, Integer> getNumPartitions(final Set<String> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); existingTopicPartitions.keySet().retainAll(topics); return existingTopicPartitions; } catch (StreamsException ex) { log.warn("Could not get number of partitions: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not get number of partitions."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); }
|
InternalTopicManager { public Map<String, Integer> getNumPartitions(final Set<String> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); existingTopicPartitions.keySet().retainAll(topics); return existingTopicPartitions; } catch (StreamsException ex) { log.warn("Could not get number of partitions: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not get number of partitions."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); static final String CLEANUP_POLICY_PROP; static final String RETENTION_MS; static final Long WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT; }
|
@Test public void shouldCreateRequiredTopics() throws Exception { InternalTopicManager internalTopicManager = new InternalTopicManager(streamsKafkaClient, 1, WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); internalTopicManager.makeReady(Collections.singletonMap(new InternalTopicConfig(topic, Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), null), 1)); }
|
public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); static final String CLEANUP_POLICY_PROP; static final String RETENTION_MS; static final Long WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT; }
|
@Test public void shouldNotCreateTopicIfExistsWithDifferentPartitions() throws Exception { InternalTopicManager internalTopicManager = new InternalTopicManager(streamsKafkaClient, 1, WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT, time); boolean exceptionWasThrown = false; try { internalTopicManager.makeReady(Collections.singletonMap(new InternalTopicConfig(topic, Collections.singleton(InternalTopicConfig.CleanupPolicy.compact), null), 2)); } catch (StreamsException e) { exceptionWasThrown = true; } Assert.assertTrue(exceptionWasThrown); }
|
public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); }
|
InternalTopicManager { public void makeReady(final Map<InternalTopicConfig, Integer> topics) { for (int i = 0; i < MAX_TOPIC_READY_TRY; i++) { try { final MetadataResponse metadata = streamsKafkaClient.fetchMetadata(); final Map<String, Integer> existingTopicPartitions = fetchExistingPartitionCountByTopic(metadata); final Map<InternalTopicConfig, Integer> topicsToBeCreated = validateTopicPartitions(topics, existingTopicPartitions); if (metadata.brokers().size() < replicationFactor) { throw new StreamsException("Found only " + metadata.brokers().size() + " brokers, " + " but replication factor is " + replicationFactor + "." + " Decrease replication factor for internal topics via StreamsConfig parameter \"replication.factor\"" + " or add more brokers to your cluster."); } streamsKafkaClient.createTopics(topicsToBeCreated, replicationFactor, windowChangeLogAdditionalRetention, metadata); return; } catch (StreamsException ex) { log.warn("Could not create internal topics: " + ex.getMessage() + " Retry #" + i); } time.sleep(100L); } throw new StreamsException("Could not create internal topics."); } InternalTopicManager(final StreamsKafkaClient streamsKafkaClient, final int replicationFactor,
final long windowChangeLogAdditionalRetention, final Time time); void makeReady(final Map<InternalTopicConfig, Integer> topics); Map<String, Integer> getNumPartitions(final Set<String> topics); void close(); static final String CLEANUP_POLICY_PROP; static final String RETENTION_MS; static final Long WINDOW_CHANGE_LOG_ADDITIONAL_RETENTION_DEFAULT; }
|
@Test(expected = ProcessorStateException.class) public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenAuthorizationException() throws Exception { final Consumer consumer = mockConsumer(new AuthorizationException("blah")); final AbstractTask task = createTask(consumer); task.updateOffsetLimits(); }
|
protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
|
@Test(expected = ProcessorStateException.class) public void shouldThrowProcessorStateExceptionOnInitializeOffsetsWhenKafkaException() throws Exception { final Consumer consumer = mockConsumer(new KafkaException("blah")); final AbstractTask task = createTask(consumer); task.updateOffsetLimits(); }
|
protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
|
@Test(expected = WakeupException.class) public void shouldThrowWakeupExceptionOnInitializeOffsetsWhenWakeupException() throws Exception { final Consumer consumer = mockConsumer(new WakeupException()); final AbstractTask task = createTask(consumer); task.updateOffsetLimits(); }
|
protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
|
AbstractTask { protected void updateOffsetLimits() { log.debug("{} Updating store offset limits {}", logPrefix); for (final TopicPartition partition : partitions) { try { final OffsetAndMetadata metadata = consumer.committed(partition); stateMgr.putOffsetLimit(partition, metadata != null ? metadata.offset() : 0L); } catch (final AuthorizationException e) { throw new ProcessorStateException(String.format("task [%s] AuthorizationException when initializing offsets for %s", id, partition), e); } catch (final WakeupException e) { throw e; } catch (final KafkaException e) { throw new ProcessorStateException(String.format("task [%s] Failed to initialize offsets for %s", id, partition), e); } } } AbstractTask(final TaskId id,
final String applicationId,
final Collection<TopicPartition> partitions,
final ProcessorTopology topology,
final Consumer<byte[], byte[]> consumer,
final ChangelogReader changelogReader,
final boolean isStandby,
final StateDirectory stateDirectory,
final ThreadCache cache,
final StreamsConfig config); abstract void resume(); abstract void commit(); abstract void suspend(); abstract void close(final boolean clean); final TaskId id(); final String applicationId(); final Set<TopicPartition> partitions(); final ProcessorTopology topology(); final ProcessorContext context(); final ThreadCache cache(); StateStore getStore(final String name); @Override String toString(); String toString(final String indent); }
|
@Test public void shouldNotAllowOffsetResetSourceWithoutTopics() { final TopologyBuilder builder = new TopologyBuilder(); final Serde<String> stringSerde = Serdes.String(); try { builder.addSource(TopologyBuilder.AutoOffsetReset.EARLIEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), new String[]{}); fail("Should throw TopologyBuilderException with no topics"); } catch (TopologyBuilderException tpe) { } }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test public void shouldNotAllowOffsetResetSourceWithDuplicateSourceName() { final TopologyBuilder builder = new TopologyBuilder(); final Serde<String> stringSerde = Serdes.String(); builder.addSource(TopologyBuilder.AutoOffsetReset.EARLIEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-1"); try { builder.addSource(TopologyBuilder.AutoOffsetReset.LATEST, "source", null, stringSerde.deserializer(), stringSerde.deserializer(), "topic-2"); fail("Should throw TopologyBuilderException for duplicate source name"); } catch (TopologyBuilderException tpe) { } }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = BadRequestException.class) public void testPutConnectorConfigNameMismatch() throws Throwable { Map<String, String> connConfig = new HashMap<>(CONNECTOR_CONFIG); connConfig.put(ConnectorConfig.NAME_CONFIG, "mismatched-name"); connectorsResource.putConnectorConfig(CONNECTOR_NAME, FORWARD, connConfig); }
|
@PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
ConnectorsResource { @PUT @Path("/{connector}/config") public Response putConnectorConfig(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward, final Map<String, String> connectorConfig) throws Throwable { FutureCallback<Herder.Created<ConnectorInfo>> cb = new FutureCallback<>(); String includedName = connectorConfig.get(ConnectorConfig.NAME_CONFIG); if (includedName != null) { if (!includedName.equals(connector)) throw new BadRequestException("Connector name configuration (" + includedName + ") doesn't match connector name in the URL (" + connector + ")"); } else { connectorConfig.put(ConnectorConfig.NAME_CONFIG, connector); } herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created<ConnectorInfo> createdInfo = completeOrForwardRequest(cb, "/connectors/" + connector + "/config", "PUT", connectorConfig, new TypeReference<ConnectorInfo>() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) response = Response.created(URI.create("/connectors/" + connector)); else response = Response.ok(); return response.entity(createdInfo.result()).build(); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
@Test(expected = TopologyBuilderException.class) public void testAddSourceWithSameName() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source", "topic-1"); builder.addSource("source", "topic-2"); }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddSourceWithSameTopic() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source", "topic-1"); builder.addSource("source-2", "topic-1"); }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddProcessorWithWrongParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor("processor", new MockProcessorSupplier(), "source"); }
|
public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddProcessorWithSelfParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor("processor", new MockProcessorSupplier(), "processor"); }
|
public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddSinkWithWrongParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink("sink", "topic-2", "source"); }
|
public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddSinkWithSelfParent() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink("sink", "topic-2", "sink"); }
|
public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testPatternMatchesAlreadyProvidedTopicSource() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source-1", "foo"); builder.addSource("source-2", Pattern.compile("f.*")); }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testNamedTopicMatchesAlreadyProvidedPattern() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source-1", Pattern.compile("f.*")); builder.addSource("source-2", "foo"); }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddStateStoreWithNonExistingProcessor() { final TopologyBuilder builder = new TopologyBuilder(); builder.addStateStore(new MockStateStoreSupplier("store", false), "no-such-processsor"); }
|
public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = TopologyBuilderException.class) public void testAddStateStoreWithDuplicates() { final TopologyBuilder builder = new TopologyBuilder(); builder.addStateStore(new MockStateStoreSupplier("store", false)); builder.addStateStore(new MockStateStoreSupplier("store", false)); }
|
public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test public void testGetConnectorTaskConfigs() throws Throwable { final Capture<Callback<List<TaskInfo>>> cb = Capture.newInstance(); herder.taskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackResult(cb, TASK_INFOS); PowerMock.replayAll(); List<TaskInfo> taskInfos = connectorsResource.getTaskConfigs(CONNECTOR_NAME, FORWARD); assertEquals(TASK_INFOS, taskInfos); PowerMock.verifyAll(); }
|
@GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
@Test public void testTopicGroups() { final TopologyBuilder builder = new TopologyBuilder(); builder.setApplicationId("X"); builder.addInternalTopic("topic-1x"); builder.addSource("source-1", "topic-1", "topic-1x"); builder.addSource("source-2", "topic-2"); builder.addSource("source-3", "topic-3"); builder.addSource("source-4", "topic-4"); builder.addSource("source-5", "topic-5"); builder.addProcessor("processor-1", new MockProcessorSupplier(), "source-1"); builder.addProcessor("processor-2", new MockProcessorSupplier(), "source-2", "processor-1"); builder.copartitionSources(mkList("source-1", "source-2")); builder.addProcessor("processor-3", new MockProcessorSupplier(), "source-3", "source-4"); Map<Integer, TopicsInfo> topicGroups = builder.topicGroups(); Map<Integer, TopicsInfo> expectedTopicGroups = new HashMap<>(); expectedTopicGroups.put(0, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-1", "X-topic-1x", "topic-2"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap())); expectedTopicGroups.put(1, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-3", "topic-4"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap())); expectedTopicGroups.put(2, new TopicsInfo(Collections.<String>emptySet(), mkSet("topic-5"), Collections.<String, InternalTopicConfig>emptyMap(), Collections.<String, InternalTopicConfig>emptyMap())); assertEquals(3, topicGroups.size()); assertEquals(expectedTopicGroups, topicGroups); Collection<Set<String>> copartitionGroups = builder.copartitionGroups(); assertEquals(mkSet(mkSet("topic-1", "X-topic-1x", "topic-2")), new HashSet<>(copartitionGroups)); }
|
public synchronized Map<Integer, TopicsInfo> topicGroups() { final Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>(); if (nodeGroups == null) nodeGroups = makeNodeGroups(); for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) { final Set<String> sinkTopics = new HashSet<>(); final Set<String> sourceTopics = new HashSet<>(); final Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>(); final Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>(); for (String node : entry.getValue()) { final List<String> topics = nodeToSourceTopics.get(node); if (topics != null) { for (String topic : topics) { if (globalTopics.contains(topic)) { continue; } if (this.internalTopicNames.contains(topic)) { final String internalTopic = decorateTopic(topic); internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap())); sourceTopics.add(internalTopic); } else { sourceTopics.add(topic); } } } final String topic = nodeToSinkTopic.get(node); if (topic != null) { if (internalTopicNames.contains(topic)) { sinkTopics.add(decorateTopic(topic)); } else { sinkTopics.add(topic); } } for (StateStoreFactory stateFactory : stateFactories.values()) { final StateStoreSupplier supplier = stateFactory.supplier; if (supplier.loggingEnabled() && stateFactory.users.contains(node)) { final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name()); final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name); stateChangelogTopics.put(name, internalTopicConfig); } } } if (!sourceTopics.isEmpty()) { topicGroups.put(entry.getKey(), new TopicsInfo( Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics))); } } return Collections.unmodifiableMap(topicGroups); }
|
TopologyBuilder { public synchronized Map<Integer, TopicsInfo> topicGroups() { final Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>(); if (nodeGroups == null) nodeGroups = makeNodeGroups(); for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) { final Set<String> sinkTopics = new HashSet<>(); final Set<String> sourceTopics = new HashSet<>(); final Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>(); final Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>(); for (String node : entry.getValue()) { final List<String> topics = nodeToSourceTopics.get(node); if (topics != null) { for (String topic : topics) { if (globalTopics.contains(topic)) { continue; } if (this.internalTopicNames.contains(topic)) { final String internalTopic = decorateTopic(topic); internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap())); sourceTopics.add(internalTopic); } else { sourceTopics.add(topic); } } } final String topic = nodeToSinkTopic.get(node); if (topic != null) { if (internalTopicNames.contains(topic)) { sinkTopics.add(decorateTopic(topic)); } else { sinkTopics.add(topic); } } for (StateStoreFactory stateFactory : stateFactories.values()) { final StateStoreSupplier supplier = stateFactory.supplier; if (supplier.loggingEnabled() && stateFactory.users.contains(node)) { final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name()); final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name); stateChangelogTopics.put(name, internalTopicConfig); } } } if (!sourceTopics.isEmpty()) { topicGroups.put(entry.getKey(), new TopicsInfo( Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics))); } } return Collections.unmodifiableMap(topicGroups); } }
|
TopologyBuilder { public synchronized Map<Integer, TopicsInfo> topicGroups() { final Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>(); if (nodeGroups == null) nodeGroups = makeNodeGroups(); for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) { final Set<String> sinkTopics = new HashSet<>(); final Set<String> sourceTopics = new HashSet<>(); final Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>(); final Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>(); for (String node : entry.getValue()) { final List<String> topics = nodeToSourceTopics.get(node); if (topics != null) { for (String topic : topics) { if (globalTopics.contains(topic)) { continue; } if (this.internalTopicNames.contains(topic)) { final String internalTopic = decorateTopic(topic); internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap())); sourceTopics.add(internalTopic); } else { sourceTopics.add(topic); } } } final String topic = nodeToSinkTopic.get(node); if (topic != null) { if (internalTopicNames.contains(topic)) { sinkTopics.add(decorateTopic(topic)); } else { sinkTopics.add(topic); } } for (StateStoreFactory stateFactory : stateFactories.values()) { final StateStoreSupplier supplier = stateFactory.supplier; if (supplier.loggingEnabled() && stateFactory.users.contains(node)) { final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name()); final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name); stateChangelogTopics.put(name, internalTopicConfig); } } } if (!sourceTopics.isEmpty()) { topicGroups.put(entry.getKey(), new TopicsInfo( Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics))); } } return Collections.unmodifiableMap(topicGroups); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized Map<Integer, TopicsInfo> topicGroups() { final Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>(); if (nodeGroups == null) nodeGroups = makeNodeGroups(); for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) { final Set<String> sinkTopics = new HashSet<>(); final Set<String> sourceTopics = new HashSet<>(); final Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>(); final Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>(); for (String node : entry.getValue()) { final List<String> topics = nodeToSourceTopics.get(node); if (topics != null) { for (String topic : topics) { if (globalTopics.contains(topic)) { continue; } if (this.internalTopicNames.contains(topic)) { final String internalTopic = decorateTopic(topic); internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap())); sourceTopics.add(internalTopic); } else { sourceTopics.add(topic); } } } final String topic = nodeToSinkTopic.get(node); if (topic != null) { if (internalTopicNames.contains(topic)) { sinkTopics.add(decorateTopic(topic)); } else { sinkTopics.add(topic); } } for (StateStoreFactory stateFactory : stateFactories.values()) { final StateStoreSupplier supplier = stateFactory.supplier; if (supplier.loggingEnabled() && stateFactory.users.contains(node)) { final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name()); final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name); stateChangelogTopics.put(name, internalTopicConfig); } } } if (!sourceTopics.isEmpty()) { topicGroups.put(entry.getKey(), new TopicsInfo( Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics))); } } return Collections.unmodifiableMap(topicGroups); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized Map<Integer, TopicsInfo> topicGroups() { final Map<Integer, TopicsInfo> topicGroups = new LinkedHashMap<>(); if (nodeGroups == null) nodeGroups = makeNodeGroups(); for (Map.Entry<Integer, Set<String>> entry : nodeGroups.entrySet()) { final Set<String> sinkTopics = new HashSet<>(); final Set<String> sourceTopics = new HashSet<>(); final Map<String, InternalTopicConfig> internalSourceTopics = new HashMap<>(); final Map<String, InternalTopicConfig> stateChangelogTopics = new HashMap<>(); for (String node : entry.getValue()) { final List<String> topics = nodeToSourceTopics.get(node); if (topics != null) { for (String topic : topics) { if (globalTopics.contains(topic)) { continue; } if (this.internalTopicNames.contains(topic)) { final String internalTopic = decorateTopic(topic); internalSourceTopics.put(internalTopic, new InternalTopicConfig(internalTopic, Collections.singleton(InternalTopicConfig.CleanupPolicy.delete), Collections.<String, String>emptyMap())); sourceTopics.add(internalTopic); } else { sourceTopics.add(topic); } } } final String topic = nodeToSinkTopic.get(node); if (topic != null) { if (internalTopicNames.contains(topic)) { sinkTopics.add(decorateTopic(topic)); } else { sinkTopics.add(topic); } } for (StateStoreFactory stateFactory : stateFactories.values()) { final StateStoreSupplier supplier = stateFactory.supplier; if (supplier.loggingEnabled() && stateFactory.users.contains(node)) { final String name = ProcessorStateManager.storeChangelogTopic(applicationId, supplier.name()); final InternalTopicConfig internalTopicConfig = createInternalTopicConfig(supplier, name); stateChangelogTopics.put(name, internalTopicConfig); } } } if (!sourceTopics.isEmpty()) { topicGroups.put(entry.getKey(), new TopicsInfo( Collections.unmodifiableSet(sinkTopics), Collections.unmodifiableSet(sourceTopics), Collections.unmodifiableMap(internalSourceTopics), Collections.unmodifiableMap(stateChangelogTopics))); } } return Collections.unmodifiableMap(topicGroups); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test public void testBuild() { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource("source-1", "topic-1", "topic-1x"); builder.addSource("source-2", "topic-2"); builder.addSource("source-3", "topic-3"); builder.addSource("source-4", "topic-4"); builder.addSource("source-5", "topic-5"); builder.addProcessor("processor-1", new MockProcessorSupplier(), "source-1"); builder.addProcessor("processor-2", new MockProcessorSupplier(), "source-2", "processor-1"); builder.addProcessor("processor-3", new MockProcessorSupplier(), "source-3", "source-4"); builder.setApplicationId("X"); ProcessorTopology topology0 = builder.build(0); ProcessorTopology topology1 = builder.build(1); ProcessorTopology topology2 = builder.build(2); assertEquals(mkSet("source-1", "source-2", "processor-1", "processor-2"), nodeNames(topology0.processors())); assertEquals(mkSet("source-3", "source-4", "processor-3"), nodeNames(topology1.processors())); assertEquals(mkSet("source-5"), nodeNames(topology2.processors())); }
|
public synchronized ProcessorTopology build(final Integer topicGroupId) { Set<String> nodeGroup; if (topicGroupId != null) { nodeGroup = nodeGroups().get(topicGroupId); } else { final Set<String> globalNodeGroups = globalNodeGroups(); final Collection<Set<String>> values = nodeGroups().values(); nodeGroup = new HashSet<>(); for (Set<String> value : values) { nodeGroup.addAll(value); } nodeGroup.removeAll(globalNodeGroups); } return build(nodeGroup); }
|
TopologyBuilder { public synchronized ProcessorTopology build(final Integer topicGroupId) { Set<String> nodeGroup; if (topicGroupId != null) { nodeGroup = nodeGroups().get(topicGroupId); } else { final Set<String> globalNodeGroups = globalNodeGroups(); final Collection<Set<String>> values = nodeGroups().values(); nodeGroup = new HashSet<>(); for (Set<String> value : values) { nodeGroup.addAll(value); } nodeGroup.removeAll(globalNodeGroups); } return build(nodeGroup); } }
|
TopologyBuilder { public synchronized ProcessorTopology build(final Integer topicGroupId) { Set<String> nodeGroup; if (topicGroupId != null) { nodeGroup = nodeGroups().get(topicGroupId); } else { final Set<String> globalNodeGroups = globalNodeGroups(); final Collection<Set<String>> values = nodeGroups().values(); nodeGroup = new HashSet<>(); for (Set<String> value : values) { nodeGroup.addAll(value); } nodeGroup.removeAll(globalNodeGroups); } return build(nodeGroup); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized ProcessorTopology build(final Integer topicGroupId) { Set<String> nodeGroup; if (topicGroupId != null) { nodeGroup = nodeGroups().get(topicGroupId); } else { final Set<String> globalNodeGroups = globalNodeGroups(); final Collection<Set<String>> values = nodeGroups().values(); nodeGroup = new HashSet<>(); for (Set<String> value : values) { nodeGroup.addAll(value); } nodeGroup.removeAll(globalNodeGroups); } return build(nodeGroup); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized ProcessorTopology build(final Integer topicGroupId) { Set<String> nodeGroup; if (topicGroupId != null) { nodeGroup = nodeGroups().get(topicGroupId); } else { final Set<String> globalNodeGroups = globalNodeGroups(); final Collection<Set<String>> values = nodeGroups().values(); nodeGroup = new HashSet<>(); for (Set<String> value : values) { nodeGroup.addAll(value); } nodeGroup.removeAll(globalNodeGroups); } return build(nodeGroup); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAllowNullNameWhenAddingSink() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink(null, "topic"); }
|
public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAllowNullTopicWhenAddingSink() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addSink("name", null); }
|
public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames) { return addSink(name, topic, null, null, parentNames); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAllowNullNameWhenAddingProcessor() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor(null, new ProcessorSupplier() { @Override public Processor get() { return null; } }); }
|
public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAllowNullProcessorSupplier() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addProcessor("name", null); }
|
public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames) { Objects.requireNonNull(name, "name must not be null"); Objects.requireNonNull(supplier, "supplier must not be null"); if (nodeFactories.containsKey(name)) throw new TopologyBuilderException("Processor " + name + " is already added."); for (final String parent : parentNames) { if (parent.equals(name)) { throw new TopologyBuilderException("Processor " + name + " cannot be a parent of itself."); } if (!nodeFactories.containsKey(parent)) { throw new TopologyBuilderException("Parent processor " + parent + " is not added yet."); } } nodeFactories.put(name, new ProcessorNodeFactory(name, parentNames, supplier)); nodeGrouper.add(name); nodeGrouper.unite(name, parentNames); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAllowNullNameWhenAddingSource() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addSource(null, Pattern.compile(".*")); }
|
public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addSource(final String name, final String... topics) { return addSource(null, name, null, null, null, topics); } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAllowNullProcessorNameWhenConnectingProcessorAndStateStores() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.connectProcessorAndStateStores(null, "store"); }
|
public synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { Objects.requireNonNull(processorName, "processorName can't be null"); if (stateStoreNames != null) { for (String stateStoreName : stateStoreNames) { connectProcessorAndStateStore(processorName, stateStoreName); } } return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { Objects.requireNonNull(processorName, "processorName can't be null"); if (stateStoreNames != null) { for (String stateStoreName : stateStoreNames) { connectProcessorAndStateStore(processorName, stateStoreName); } } return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { Objects.requireNonNull(processorName, "processorName can't be null"); if (stateStoreNames != null) { for (String stateStoreName : stateStoreNames) { connectProcessorAndStateStore(processorName, stateStoreName); } } return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { Objects.requireNonNull(processorName, "processorName can't be null"); if (stateStoreNames != null) { for (String stateStoreName : stateStoreNames) { connectProcessorAndStateStore(processorName, stateStoreName); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames) { Objects.requireNonNull(processorName, "processorName can't be null"); if (stateStoreNames != null) { for (String stateStoreName : stateStoreNames) { connectProcessorAndStateStore(processorName, stateStoreName); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotAddNullInternalTopic() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addInternalTopic(null); }
|
public synchronized final TopologyBuilder addInternalTopic(final String topicName) { Objects.requireNonNull(topicName, "topicName can't be null"); this.internalTopicNames.add(topicName); return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addInternalTopic(final String topicName) { Objects.requireNonNull(topicName, "topicName can't be null"); this.internalTopicNames.add(topicName); return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addInternalTopic(final String topicName) { Objects.requireNonNull(topicName, "topicName can't be null"); this.internalTopicNames.add(topicName); return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addInternalTopic(final String topicName) { Objects.requireNonNull(topicName, "topicName can't be null"); this.internalTopicNames.add(topicName); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addInternalTopic(final String topicName) { Objects.requireNonNull(topicName, "topicName can't be null"); this.internalTopicNames.add(topicName); return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NullPointerException.class) public void shouldNotSetApplicationIdToNull() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.setApplicationId(null); }
|
public synchronized final TopologyBuilder setApplicationId(final String applicationId) { Objects.requireNonNull(applicationId, "applicationId can't be null"); this.applicationId = applicationId; return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder setApplicationId(final String applicationId) { Objects.requireNonNull(applicationId, "applicationId can't be null"); this.applicationId = applicationId; return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder setApplicationId(final String applicationId) { Objects.requireNonNull(applicationId, "applicationId can't be null"); this.applicationId = applicationId; return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder setApplicationId(final String applicationId) { Objects.requireNonNull(applicationId, "applicationId can't be null"); this.applicationId = applicationId; return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder setApplicationId(final String applicationId) { Objects.requireNonNull(applicationId, "applicationId can't be null"); this.applicationId = applicationId; return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
@Test(expected = NotFoundException.class) public void testGetConnectorTaskConfigsConnectorNotFound() throws Throwable { final Capture<Callback<List<TaskInfo>>> cb = Capture.newInstance(); herder.taskConfigs(EasyMock.eq(CONNECTOR_NAME), EasyMock.capture(cb)); expectAndCallbackException(cb, new NotFoundException("connector not found")); PowerMock.replayAll(); connectorsResource.getTaskConfigs(CONNECTOR_NAME, FORWARD); PowerMock.verifyAll(); }
|
@GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
ConnectorsResource { @GET @Path("/{connector}/tasks") public List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector, final @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback<List<TaskInfo>> cb = new FutureCallback<>(); herder.taskConfigs(connector, cb); return completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks", "GET", null, new TypeReference<List<TaskInfo>>() { }, forward); } ConnectorsResource(Herder herder); @GET @Path("/") Collection<String> listConnectors(final @QueryParam("forward") Boolean forward); @POST @Path("/") Response createConnector(final @QueryParam("forward") Boolean forward,
final CreateConnectorRequest createRequest); @GET @Path("/{connector}") ConnectorInfo getConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/config") Map<String, String> getConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @GET @Path("/{connector}/status") ConnectorStateInfo getConnectorStatus(final @PathParam("connector") String connector); @PUT @Path("/{connector}/config") Response putConnectorConfig(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final Map<String, String> connectorConfig); @POST @Path("/{connector}/restart") void restartConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @PUT @Path("/{connector}/pause") Response pauseConnector(@PathParam("connector") String connector); @PUT @Path("/{connector}/resume") Response resumeConnector(@PathParam("connector") String connector); @GET @Path("/{connector}/tasks") List<TaskInfo> getTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); @POST @Path("/{connector}/tasks") void putTaskConfigs(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward,
final List<Map<String, String>> taskConfigs); @GET @Path("/{connector}/tasks/{task}/status") ConnectorStateInfo.TaskState getTaskStatus(final @PathParam("connector") String connector,
final @PathParam("task") Integer task); @POST @Path("/{connector}/tasks/{task}/restart") void restartTask(final @PathParam("connector") String connector,
final @PathParam("task") Integer task,
final @QueryParam("forward") Boolean forward); @DELETE @Path("/{connector}") void destroyConnector(final @PathParam("connector") String connector,
final @QueryParam("forward") Boolean forward); }
|
@Test(expected = NullPointerException.class) public void shouldNotAddNullStateStoreSupplier() throws Exception { final TopologyBuilder builder = new TopologyBuilder(); builder.addStateStore(null); }
|
public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
TopologyBuilder { public synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames) { Objects.requireNonNull(supplier, "supplier can't be null"); if (stateFactories.containsKey(supplier.name())) { throw new TopologyBuilderException("StateStore " + supplier.name() + " is already added."); } stateFactories.put(supplier.name(), new StateStoreFactory(supplier)); if (processorNames != null) { for (String processorName : processorNames) { connectProcessorAndStateStore(processorName, supplier.name()); } } return this; } TopologyBuilder(); synchronized final TopologyBuilder setApplicationId(final String applicationId); synchronized final TopologyBuilder addSource(final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final String... topics); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final String... topics); synchronized final TopologyBuilder addSource(final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset, final TimestampExtractor timestampExtractor, final String name, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final String... topics); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final String... topics); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized TopologyBuilder addGlobalStore(final StateStoreSupplier<KeyValueStore> storeSupplier,
final String sourceName,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valueDeserializer,
final String topic,
final String processorName,
final ProcessorSupplier stateUpdateSupplier); synchronized final TopologyBuilder addSource(final String name, final Deserializer keyDeserializer, final Deserializer valDeserializer, final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final TimestampExtractor timestampExtractor,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSource(final AutoOffsetReset offsetReset,
final String name,
final Deserializer keyDeserializer,
final Deserializer valDeserializer,
final Pattern topicPattern); synchronized final TopologyBuilder addSink(final String name, final String topic, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final StreamPartitioner partitioner, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer keySerializer, final Serializer valSerializer, final String... parentNames); synchronized final TopologyBuilder addSink(final String name, final String topic, final Serializer<K> keySerializer, final Serializer<V> valSerializer, final StreamPartitioner<? super K, ? super V> partitioner, final String... parentNames); synchronized final TopologyBuilder addProcessor(final String name, final ProcessorSupplier supplier, final String... parentNames); synchronized final TopologyBuilder addStateStore(final StateStoreSupplier supplier, final String... processorNames); synchronized final TopologyBuilder connectProcessorAndStateStores(final String processorName, final String... stateStoreNames); synchronized final TopologyBuilder connectProcessors(final String... processorNames); synchronized final TopologyBuilder addInternalTopic(final String topicName); synchronized final TopologyBuilder copartitionSources(final Collection<String> sourceNodes); synchronized Map<Integer, Set<String>> nodeGroups(); synchronized ProcessorTopology build(final Integer topicGroupId); synchronized ProcessorTopology buildGlobalStateTopology(); Map<String, StateStore> globalStateStores(); synchronized Map<Integer, TopicsInfo> topicGroups(); synchronized Pattern earliestResetTopicsPattern(); synchronized Pattern latestResetTopicsPattern(); Map<String, List<String>> stateStoreNameToSourceTopics(); synchronized Collection<Set<String>> copartitionGroups(); SubscriptionUpdates subscriptionUpdates(); synchronized Pattern sourceTopicPattern(); synchronized void updateSubscriptions(final SubscriptionUpdates subscriptionUpdates, final String threadId); }
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.