focal_method
stringlengths
13
60.9k
test_case
stringlengths
25
109k
public static <T> AvroCoder<T> reflect(TypeDescriptor<T> type) { return reflect((Class<T>) type.getRawType()); }
@Test public void testPojoEncoding() throws Exception { Pojo value = new Pojo("Hello", 42, DATETIME_A); AvroCoder<Pojo> coder = AvroCoder.reflect(Pojo.class); CoderProperties.coderDecodeEncodeEqual(coder, value); }
@Override public ExecuteResult execute(final ServiceContext serviceContext, final ConfiguredKsqlPlan plan, final boolean restoreInProgress) { try { final ExecuteResult result = EngineExecutor .create(primaryContext, serviceContext, plan.getConfig()) .execute(plan.getPlan(), restoreInProgress); return result; } catch (final KsqlStatementException e) { throw e; } catch (final KsqlException e) { // add the statement text to the KsqlException throw new KsqlStatementException( e.getMessage(), e.getMessage(), plan.getPlan().getStatementText(), e.getCause() ); } }
@Test public void shouldExecuteInsertIntoStream() { // Given: setupKsqlEngineWithSharedRuntimeEnabled(); KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "create stream bar as select * from orders;", ksqlConfig, Collections.emptyMap() ); // When: final List<QueryMetadata> queries = KsqlEngineTestUtil.execute( serviceContext, ksqlEngine, "insert into bar select * from orders;", ksqlConfig, Collections.emptyMap() ); // Then: assertThat(queries, hasSize(1)); }
public static FlinkJobServerDriver fromParams(String[] args) { return fromConfig(parseArgs(args)); }
@Test(timeout = 30_000) public void testJobServerDriver() throws Exception { FlinkJobServerDriver driver = null; Thread driverThread = null; final PrintStream oldErr = System.err; ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintStream newErr = new PrintStream(baos); try { System.setErr(newErr); driver = FlinkJobServerDriver.fromParams( new String[] {"--job-port=0", "--artifact-port=0", "--expansion-port=0"}); driverThread = new Thread(driver); driverThread.start(); boolean success = false; while (!success) { newErr.flush(); String output = baos.toString(StandardCharsets.UTF_8.name()); if (output.contains("JobService started on localhost:") && output.contains("ArtifactStagingService started on localhost:") && output.contains("ExpansionService started on localhost:")) { success = true; } else { Thread.sleep(100); } } assertThat(driver.getJobServerUrl(), is(not(nullValue()))); assertThat( baos.toString(StandardCharsets.UTF_8.name()), containsString(driver.getJobServerUrl())); assertThat(driverThread.isAlive(), is(true)); } catch (Throwable t) { // restore to print exception System.setErr(oldErr); throw t; } finally { System.setErr(oldErr); if (driver != null) { driver.stop(); } if (driverThread != null) { driverThread.interrupt(); driverThread.join(); } } }
@Override public void startAsync() throws Exception { doAction(Executable::startAsync); }
@Test public void shouldStartAll() throws Exception { // When: multiExecutable.startAsync(); // Then: final InOrder inOrder = Mockito.inOrder(executable1, executable2); inOrder.verify(executable1).startAsync(); inOrder.verify(executable2).startAsync(); inOrder.verifyNoMoreInteractions(); }
@Override public InitialContextFactory createInitialContextFactory(Hashtable<?, ?> environment) throws NamingException { String className = environment != null ? (String) environment.get(Context.INITIAL_CONTEXT_FACTORY) : null; if (className == null) { return new ServerInitialContextFactory(namedObjects); } if (initialContextFactories.containsKey(className)) { return initialContextFactories.get(className); } else { return InitialContextFactory.class.cast(Util.getInstance(className, Thread.currentThread().getContextClassLoader())); } }
@Test public void testNoIllegalAccessJDKInitialContextFactories() throws NamingException { ServerInitialContextFactoryBuilder builder = new ServerInitialContextFactoryBuilder(); Properties env = new Properties(); env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.dns.DnsContextFactory"); InitialContextFactory contextFactory = builder.createInitialContextFactory(env); }
Iterator<String> iterator() { if (serverUrls.isEmpty()) { LOGGER.error("[{}] [iterator-serverlist] No server address defined!", name); } return new ServerAddressIterator(serverUrls); }
@Test void testIterator() { List<String> addrs = new ArrayList<>(); String addr = "1.1.1.1:8848"; addrs.add(addr); final ServerListManager mgr = new ServerListManager(addrs, "aaa"); // new iterator final Iterator<String> it = mgr.iterator(); assertTrue(it.hasNext()); assertEquals(addr, it.next()); assertNull(mgr.getIterator()); mgr.refreshCurrentServerAddr(); assertNotNull(mgr.getIterator()); final String currentServerAddr = mgr.getCurrentServerAddr(); assertEquals(addr, currentServerAddr); final String nextServerAddr = mgr.getNextServerAddr(); assertEquals(addr, nextServerAddr); final Iterator<String> iterator1 = mgr.iterator(); assertTrue(iterator1.hasNext()); }
@Override public Row projectColumnsToWrite(Row in) { return partitionIndexes.length == 0 ? in : Row.project(in, nonPartitionIndexes); }
@Test void testProjectColumnsToWrite() { Row projected1 = new RowPartitionComputer( "", new String[] {"f1", "p1", "p2", "f2"}, new String[] {"p1", "p2"}) .projectColumnsToWrite(Row.of(1, 2, 3, 4)); assertThat(projected1).isEqualTo(Row.of(1, 4)); Row projected2 = new RowPartitionComputer( "", new String[] {"f1", "f2", "p1", "p2"}, new String[] {"p1", "p2"}) .projectColumnsToWrite(Row.of(1, 2, 3, 4)); assertThat(projected2).isEqualTo(Row.of(1, 2)); Row projected3 = new RowPartitionComputer( "", new String[] {"f1", "p1", "f2", "p2"}, new String[] {"p1", "p2"}) .projectColumnsToWrite(Row.of(1, 2, 3, 4)); assertThat(projected3).isEqualTo(Row.of(1, 3)); }
@Override public void sendSmsCode(SmsCodeSendReqDTO reqDTO) { SmsSceneEnum sceneEnum = SmsSceneEnum.getCodeByScene(reqDTO.getScene()); Assert.notNull(sceneEnum, "验证码场景({}) 查找不到配置", reqDTO.getScene()); // 创建验证码 String code = createSmsCode(reqDTO.getMobile(), reqDTO.getScene(), reqDTO.getCreateIp()); // 发送验证码 smsSendService.sendSingleSms(reqDTO.getMobile(), null, null, sceneEnum.getTemplateCode(), MapUtil.of("code", code)); }
@Test public void sendSmsCode_tooFast() { // mock 数据 SmsCodeDO smsCodeDO = randomPojo(SmsCodeDO.class, o -> o.setMobile("15601691300").setTodayIndex(1)); smsCodeMapper.insert(smsCodeDO); // 准备参数 SmsCodeSendReqDTO reqDTO = randomPojo(SmsCodeSendReqDTO.class, o -> { o.setMobile("15601691300"); o.setScene(SmsSceneEnum.MEMBER_LOGIN.getScene()); }); // mock 方法 SqlConstants.init(DbType.MYSQL); // 调用,并断言异常 assertServiceException(() -> smsCodeService.sendSmsCode(reqDTO), SMS_CODE_SEND_TOO_FAST); }
@Override public int read() throws IOException { checkStream(); return (read(oneByte, 0, oneByte.length) == -1) ? -1 : (oneByte[0] & 0xff); }
@Test public void testReadBuffer() throws IOException { // 32 buf.length < 52 TEST_STRING.length() byte[] buf = new byte[32]; int bytesToRead = TEST_STRING.length(); int i = 0; while (bytesToRead > 0) { int n = Math.min(bytesToRead, buf.length); int bytesRead = decompressorStream.read(buf, 0, n); assertTrue(bytesRead > 0 && bytesRead <= n); assertThat(new String(buf, 0, bytesRead)) .isEqualTo(TEST_STRING.substring(i, i + bytesRead)); bytesToRead = bytesToRead - bytesRead; i = i + bytesRead; } try { int ret = decompressorStream.read(buf, 0, buf.length); fail("Not reachable but got ret " + ret); } catch (EOFException e) { // Expect EOF exception } }
public void matches(@Nullable String regex) { checkNotNull(regex); if (actual == null) { failWithActual("expected a string that matches", regex); } else if (!actual.matches(regex)) { if (regex.equals(actual)) { failWithoutActual( fact("expected to match", regex), fact("but was", actual), simpleFact("Looks like you want to use .isEqualTo() for an exact equality assertion.")); } else if (Platform.containsMatch(actual, regex)) { failWithoutActual( fact("expected to match", regex), fact("but was", actual), simpleFact("Did you mean to call containsMatch() instead of match()?")); } else { failWithActual("expected to match", regex); } } }
@Test public void stringMatchesStringFailNull() { expectFailureWhenTestingThat(null).matches(".*aaa.*"); assertFailureValue("expected a string that matches", ".*aaa.*"); }
public static int getLinkCount(File fileName) throws IOException { if (fileName == null) { throw new IOException( "invalid argument to getLinkCount: file name is null"); } if (!fileName.exists()) { throw new FileNotFoundException(fileName + " not found."); } if (supportsHardLink(fileName)) { return (int) Files.getAttribute(fileName.toPath(), FILE_ATTRIBUTE); } // construct and execute shell command String[] cmd = getHardLinkCommand.linkCount(fileName); String inpMsg = null; String errMsg = null; int exitValue = -1; BufferedReader in = null; ShellCommandExecutor shexec = new ShellCommandExecutor(cmd); try { shexec.execute(); in = new BufferedReader(new StringReader(shexec.getOutput())); inpMsg = in.readLine(); exitValue = shexec.getExitCode(); if (inpMsg == null || exitValue != 0) { throw createIOException(fileName, inpMsg, errMsg, exitValue, null); } if (Shell.SOLARIS) { String[] result = inpMsg.split("\\s+"); return Integer.parseInt(result[1]); } else { return Integer.parseInt(inpMsg); } } catch (ExitCodeException e) { inpMsg = shexec.getOutput(); errMsg = e.getMessage(); exitValue = e.getExitCode(); throw createIOException(fileName, inpMsg, errMsg, exitValue, e); } catch (NumberFormatException e) { throw createIOException(fileName, inpMsg, errMsg, exitValue, e); } finally { IOUtils.closeStream(in); } }
@Test public void testGetLinkCount() throws IOException { //at beginning of world, check that source files have link count "1" //since they haven't been hardlinked yet assertEquals(1, getLinkCount(x1)); assertEquals(1, getLinkCount(x2)); assertEquals(1, getLinkCount(x3)); }
@Override public void onMsg(TbContext ctx, TbMsg msg) { JsonObject json = JsonParser.parseString(msg.getData()).getAsJsonObject(); String tmp; if (msg.getOriginator().getEntityType() != EntityType.DEVICE) { ctx.tellFailure(msg, new RuntimeException("Message originator is not a device entity!")); } else if (!json.has("method")) { ctx.tellFailure(msg, new RuntimeException("Method is not present in the message!")); } else if (!json.has("params")) { ctx.tellFailure(msg, new RuntimeException("Params are not present in the message!")); } else { int requestId = json.has("requestId") ? json.get("requestId").getAsInt() : random.nextInt(); boolean restApiCall = msg.isTypeOf(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE); tmp = msg.getMetaData().getValue("oneway"); boolean oneway = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue(DataConstants.PERSISTENT); boolean persisted = !StringUtils.isEmpty(tmp) && Boolean.parseBoolean(tmp); tmp = msg.getMetaData().getValue("requestUUID"); UUID requestUUID = !StringUtils.isEmpty(tmp) ? UUID.fromString(tmp) : Uuids.timeBased(); tmp = msg.getMetaData().getValue("originServiceId"); String originServiceId = !StringUtils.isEmpty(tmp) ? tmp : null; tmp = msg.getMetaData().getValue(DataConstants.EXPIRATION_TIME); long expirationTime = !StringUtils.isEmpty(tmp) ? Long.parseLong(tmp) : (System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(config.getTimeoutInSeconds())); tmp = msg.getMetaData().getValue(DataConstants.RETRIES); Integer retries = !StringUtils.isEmpty(tmp) ? Integer.parseInt(tmp) : null; String params = parseJsonData(json.get("params")); String additionalInfo = parseJsonData(json.get(DataConstants.ADDITIONAL_INFO)); RuleEngineDeviceRpcRequest request = RuleEngineDeviceRpcRequest.builder() .oneway(oneway) .method(json.get("method").getAsString()) .body(params) .tenantId(ctx.getTenantId()) .deviceId(new DeviceId(msg.getOriginator().getId())) .requestId(requestId) .requestUUID(requestUUID) .originServiceId(originServiceId) .expirationTime(expirationTime) .retries(retries) .restApiCall(restApiCall) .persisted(persisted) .additionalInfo(additionalInfo) .build(); ctx.getRpcService().sendRpcRequestToDevice(request, ruleEngineDeviceRpcResponse -> { if (ruleEngineDeviceRpcResponse.getError().isEmpty()) { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), ruleEngineDeviceRpcResponse.getResponse().orElse(TbMsg.EMPTY_JSON_OBJECT)); ctx.enqueueForTellNext(next, TbNodeConnectionType.SUCCESS); } else { TbMsg next = ctx.newMsg(msg.getQueueName(), msg.getType(), msg.getOriginator(), msg.getCustomerId(), msg.getMetaData(), wrap("error", ruleEngineDeviceRpcResponse.getError().get().name())); ctx.enqueueForTellFailure(next, ruleEngineDeviceRpcResponse.getError().get().name()); } }); ctx.ack(msg); } }
@Test public void givenRequestUUID_whenOnMsg_thenVerifyRequest() { given(ctxMock.getRpcService()).willReturn(rpcServiceMock); given(ctxMock.getTenantId()).willReturn(TENANT_ID); String requestUUID = "b795a241-5a30-48fb-92d5-46b864d47130"; TbMsgMetaData metadata = new TbMsgMetaData(); metadata.putValue("requestUUID", requestUUID); TbMsg msg = TbMsg.newMsg(TbMsgType.RPC_CALL_FROM_SERVER_TO_DEVICE, DEVICE_ID, metadata, MSG_DATA); node.onMsg(ctxMock, msg); ArgumentCaptor<RuleEngineDeviceRpcRequest> requestCaptor = captureRequest(); assertThat(requestCaptor.getValue().getRequestUUID()).isEqualTo(UUID.fromString(requestUUID)); }
@PublicAPI(usage = ACCESS) public static ArchRule testClassesShouldResideInTheSamePackageAsImplementation() { return testClassesShouldResideInTheSamePackageAsImplementation("Test"); }
@Test public void test_class_in_same_package_should_fail_when_test_class_reside_in_different_package_as_implementation() { assertThatRule(testClassesShouldResideInTheSamePackageAsImplementation()) .checking(new ClassFileImporter().importPackagesOf(ImplementationClassWithWrongTestClassPackage.class)) .hasOnlyOneViolationWithStandardPattern(ImplementationClassWithWrongTestClassPackageTest.class, "does not reside in same package as implementation class <" + ImplementationClassWithWrongTestClassPackage.class.getName() + ">"); }
public static <T> TimeLimiterOperator<T> of(TimeLimiter timeLimiter) { return new TimeLimiterOperator<>(timeLimiter); }
@Test public void doNotTimeoutUsingFlux() { given(timeLimiter.getTimeLimiterConfig()) .willReturn(toConfig(Duration.ofMinutes(1))); Flux<?> flux = Flux.interval(Duration.ofMillis(1), Schedulers.single()) .take(2) .transformDeferred(TimeLimiterOperator.of(timeLimiter)); StepVerifier.create(flux) .expectNextCount(2) .verifyComplete(); then(timeLimiter).should(times(3)) .onSuccess(); }
public <E extends Enum<E>> void logControlSessionStateChange( final E oldState, final E newState, final long controlSessionId) { final int length = sessionStateChangeLength(oldState, newState); final int captureLength = captureLength(length); final int encodedLength = encodedLength(captureLength); final ManyToOneRingBuffer ringBuffer = this.ringBuffer; final int index = ringBuffer.tryClaim(CONTROL_SESSION_STATE_CHANGE.toEventCodeId(), encodedLength); if (index > 0) { try { encodeSessionStateChange( (UnsafeBuffer)ringBuffer.buffer(), index, captureLength, length, oldState, newState, controlSessionId ); } finally { ringBuffer.commit(index); } } }
@Test void logControlSessionStateChange() { final int offset = ALIGNMENT * 4; logBuffer.putLong(CAPACITY + TAIL_POSITION_OFFSET, offset); final ChronoUnit from = ChronoUnit.CENTURIES; final ChronoUnit to = ChronoUnit.MICROS; final long id = 555_000_000_000L; final String payload = from.name() + STATE_SEPARATOR + to.name(); final int captureLength = SIZE_OF_LONG + SIZE_OF_INT + payload.length(); logger.logControlSessionStateChange(from, to, id); verifyLogHeader( logBuffer, offset, CONTROL_SESSION_STATE_CHANGE.toEventCodeId(), captureLength, captureLength); assertEquals(id, logBuffer.getLong(encodedMsgOffset(offset + LOG_HEADER_LENGTH), LITTLE_ENDIAN)); assertEquals( payload, logBuffer.getStringAscii(encodedMsgOffset(offset + LOG_HEADER_LENGTH + SIZE_OF_LONG))); }
@Override public void start() throws Exception { if (!state.compareAndSet(State.LATENT, State.STARTED)) { throw new IllegalStateException(); } try { client.create().creatingParentContainersIfNeeded().forPath(queuePath); } catch (KeeperException.NodeExistsException ignore) { // this is OK } if (lockPath != null) { try { client.create().creatingParentContainersIfNeeded().forPath(lockPath); } catch (KeeperException.NodeExistsException ignore) { // this is OK } } if (!isProducerOnly || (maxItems != QueueBuilder.NOT_SET)) { childrenCache.start(); } if (!isProducerOnly) { service.submit(new Callable<Object>() { @Override public Object call() { runLoop(); return null; } }); } }
@Test public void testSafetyWithCrash() throws Exception { final int itemQty = 100; DistributedQueue<TestQueueItem> producerQueue = null; DistributedQueue<TestQueueItem> consumerQueue1 = null; DistributedQueue<TestQueueItem> consumerQueue2 = null; CuratorFramework producerClient = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); CuratorFramework consumerClient1 = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); CuratorFramework consumerClient2 = CuratorFrameworkFactory.newClient(server.getConnectString(), new RetryOneTime(1)); try { producerClient.start(); consumerClient1.start(); consumerClient2.start(); ExecutorService service = Executors.newCachedThreadPool(); // make the producer queue { producerQueue = QueueBuilder.builder(producerClient, null, serializer, QUEUE_PATH) .buildQueue(); producerQueue.start(); QueueTestProducer producer = new QueueTestProducer(producerQueue, itemQty, 0); service.submit(producer); } final Set<TestQueueItem> takenItems = Sets.newTreeSet(); final Set<TestQueueItem> takenItemsForConsumer1 = Sets.newTreeSet(); final Set<TestQueueItem> takenItemsForConsumer2 = Sets.newTreeSet(); final AtomicReference<TestQueueItem> thrownItemFromConsumer1 = new AtomicReference<TestQueueItem>(null); // make the first consumer queue { final QueueConsumer<TestQueueItem> ourQueue = new QueueConsumer<TestQueueItem>() { @Override public void consumeMessage(TestQueueItem message) throws Exception { synchronized (takenItems) { if (takenItems.size() > 10) { thrownItemFromConsumer1.set(message); throw new Exception("dummy"); // simulate a crash } } addToTakenItems(message, takenItems, itemQty); synchronized (takenItemsForConsumer1) { takenItemsForConsumer1.add(message); } Thread.sleep((long) (Math.random() * 5)); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) {} }; consumerQueue1 = QueueBuilder.builder(consumerClient1, ourQueue, serializer, QUEUE_PATH) .lockPath("/a/locks") .buildQueue(); consumerQueue1.start(); } // make the second consumer queue { final QueueConsumer<TestQueueItem> ourQueue = new QueueConsumer<TestQueueItem>() { @Override public void consumeMessage(TestQueueItem message) throws Exception { addToTakenItems(message, takenItems, itemQty); synchronized (takenItemsForConsumer2) { takenItemsForConsumer2.add(message); } Thread.sleep((long) (Math.random() * 5)); } @Override public void stateChanged(CuratorFramework client, ConnectionState newState) {} }; consumerQueue2 = QueueBuilder.builder(consumerClient2, ourQueue, serializer, QUEUE_PATH) .lockPath("/a/locks") .buildQueue(); consumerQueue2.start(); } synchronized (takenItems) { while (takenItems.size() < itemQty) { takenItems.wait(1000); } } int i = 0; for (TestQueueItem item : takenItems) { assertEquals(item.str, Integer.toString(i++)); } assertNotNull(thrownItemFromConsumer1.get()); assertTrue((takenItemsForConsumer2.contains(thrownItemFromConsumer1.get()))); assertTrue(Sets.intersection(takenItemsForConsumer1, takenItemsForConsumer2) .size() == 0); } finally { CloseableUtils.closeQuietly(producerQueue); CloseableUtils.closeQuietly(consumerQueue1); CloseableUtils.closeQuietly(consumerQueue2); CloseableUtils.closeQuietly(producerClient); CloseableUtils.closeQuietly(consumerClient1); CloseableUtils.closeQuietly(consumerClient2); } }
@Override @Nullable public byte[] readByteArray(@Nonnull String fieldName) throws IOException { return readIncompatibleField(fieldName, BYTE_ARRAY, super::readByteArray); }
@Test(expected = IncompatibleClassChangeError.class) public void testReadLongArray_IncompatibleClass() throws Exception { reader.readByteArray("byte"); }
@Override public KTable<Windowed<K>, V> aggregate(final Initializer<V> initializer) { return aggregate(initializer, Materialized.with(null, null)); }
@Test public void timeWindowAggregateManyWindowsTest() { final KTable<Windowed<String>, String> customers = groupedStream.cogroup(MockAggregator.TOSTRING_ADDER) .windowedBy(TimeWindows.of(ofMillis(500L))).aggregate( MockInitializer.STRING_INIT, Materialized.with(Serdes.String(), Serdes.String())); customers.toStream().to(OUTPUT); try (final TopologyTestDriver driver = new TopologyTestDriver(builder.build(), props)) { final TestInputTopic<String, String> testInputTopic = driver.createInputTopic( TOPIC, new StringSerializer(), new StringSerializer()); final TestOutputTopic<Windowed<String>, String> testOutputTopic = driver.createOutputTopic( OUTPUT, new TimeWindowedDeserializer<>(new StringDeserializer(), WINDOW_SIZE), new StringDeserializer()); testInputTopic.pipeInput("k1", "A", 0); testInputTopic.pipeInput("k2", "A", 499); testInputTopic.pipeInput("k2", "A", 500L); testInputTopic.pipeInput("k1", "A", 500L); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A", 0); assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+A", 499); assertOutputKeyValueTimestamp(testOutputTopic, "k2", "0+A", 500); assertOutputKeyValueTimestamp(testOutputTopic, "k1", "0+A", 500); } }
public String getActualFilePath() { return this.actualFilePath; }
@Test public void testGetActualFilePath() { Dependency instance = new Dependency(); String expResult = "file.tar"; instance.setSha1sum("non-null value"); instance.setActualFilePath(expResult); String result = instance.getActualFilePath(); assertEquals(expResult, result); }
@Override public JooqEndpoint getEndpoint() { return (JooqEndpoint) super.getEndpoint(); }
@Test public void testConsumerNoDelete() throws InterruptedException { MockEndpoint mockResult = getMockEndpoint("mock:resultBookStoreRecord"); MockEndpoint mockInserted = getMockEndpoint("mock:insertedBookStoreRecord"); mockResult.expectedMessageCount(1); mockInserted.expectedMessageCount(1); ProducerTemplate producerTemplate = context.createProducerTemplate(); // Insert BookStoreRecord bookStoreRecord = new BookStoreRecord("test"); producerTemplate.sendBody(context.getEndpoint("direct:insertBookStoreRecord"), ExchangePattern.InOut, bookStoreRecord); MockEndpoint.assertIsSatisfied(context); assertEquals(bookStoreRecord, mockInserted.getExchanges().get(0).getMessage().getBody()); assertEquals(1, ((Result) mockResult.getExchanges().get(0).getMessage().getBody()).size()); }
@Override public boolean requiresCleanupOfRecoverableState() { // we can't clean up any state prior to commit // see discussion: https://github.com/apache/flink/pull/15599#discussion_r623127365 return false; }
@Test public void testRequiresCleanupOfRecoverableState() { assertFalse(writer.requiresCleanupOfRecoverableState()); }
public String csv(String text) { if (text == null || text.isEmpty()) { return "\"\""; } final String str = text.trim().replace("\n", " "); if (str.trim().length() == 0) { return "\"\""; } return StringEscapeUtils.escapeCsv(str); }
@Test public void testCsv() { String text = null; EscapeTool instance = new EscapeTool(); String expResult = "\"\""; String result = instance.csv(text); assertEquals(expResult, result); text = ""; expResult = "\"\""; result = instance.csv(text); assertEquals(expResult, result); text = "one, two"; expResult = "\"one, two\""; result = instance.csv(text); assertEquals(expResult, result); }
protected long parseTimestamp(final String timestamp) { if(null == timestamp) { return -1; } try { final Date parsed = new MDTMSecondsDateFormatter().parse(timestamp); return parsed.getTime(); } catch(InvalidDateException e) { log.warn("Failed to parse timestamp:" + e.getMessage()); try { final Date parsed = new MDTMMillisecondsDateFormatter().parse(timestamp); return parsed.getTime(); } catch(InvalidDateException f) { log.warn("Failed to parse timestamp:" + f.getMessage()); } } log.error(String.format("Failed to parse timestamp %s", timestamp)); return -1; }
@Test public void testParseTimestampInvalid() { final long time = new FTPMlsdListResponseReader().parseTimestamp("2013"); assertEquals(-1L, time); }
public Map<String, String> getPropertiesWithPrefix(String prefix) { return getPropertiesWithPrefix(prefix, false); }
@Test public void testGetPropertiesWithFullyQualifiedName() { ConfigurationProperties configurationProperties = new ConfigurationProperties(PROPERTIES); Map<String, String> props = configurationProperties .getPropertiesWithPrefix("root.1.2", true); Assert.assertEquals(4, props.size()); Assert.assertTrue(props.containsKey("root.1.2.3")); Assert.assertEquals("TEST_VALUE_1", props.get("root.1.2.3")); Assert.assertTrue(props.containsKey("root.1.2")); Assert.assertEquals("TEST_VALUE_3", props.get("root.1.2")); Assert.assertTrue(props.containsKey("root.1.2.4.5")); Assert.assertEquals("TEST_VALUE_3_2", props.get("root.1.2.4.5")); Assert.assertTrue(props.containsKey("root.1.2.4")); Assert.assertEquals("TEST_VALUE_3_1", props.get("root.1.2.4")); }
public List<CompactionTask> produce() { // get all CF files sorted by key range start (L1+) List<SstFileMetaData> sstSortedByCfAndStartingKeys = metadataSupplier.get().stream() .filter(l -> l.level() > 0) // let RocksDB deal with L0 .sorted(SST_COMPARATOR) .collect(Collectors.toList()); LOG.trace("Input files: {}", sstSortedByCfAndStartingKeys.size()); List<CompactionTask> tasks = groupIntoTasks(sstSortedByCfAndStartingKeys); tasks.sort(Comparator.<CompactionTask>comparingInt(t -> t.files.size()).reversed()); return tasks.subList(0, Math.min(tasks.size(), settings.maxManualCompactions)); }
@Test void testMaxParallelCompactions() { assertThat( produce( configBuilder() .setMaxFilesToCompact(1) .setMaxParallelCompactions(2) .build(), sstBuilder().build(), sstBuilder().build(), sstBuilder().build(), sstBuilder().build())) .hasSize(2); }
public FEELFnResult<Boolean> invoke(@ParameterName("list") List list) { if (list == null) { return FEELFnResult.ofResult(false); } boolean result = false; for (final Object element : list) { if (element != null && !(element instanceof Boolean)) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "an element in the list is not" + " a Boolean")); } else { if (element != null) { result |= (Boolean) element; } } } return FEELFnResult.ofResult(result); }
@Test void invokeBooleanParamFalse() { FunctionTestUtil.assertResult(anyFunction.invoke(false), false); }
@Override public double getMean() { if (values.length == 0) { return 0; } double sum = 0; for (long value : values) { sum += value; } return sum / values.length; }
@Test public void calculatesTheMeanValue() throws Exception { assertThat(snapshot.getMean()) .isEqualTo(3.0); }
public static void free(final DirectBuffer buffer) { if (null != buffer) { free(buffer.byteBuffer()); } }
@Test void freeIsANoOpIfDirectBufferIsNull() { BufferUtil.free((DirectBuffer)null); }
public ClusterStateBundle.FeedBlock inferContentClusterFeedBlockOrNull(ContentCluster cluster) { if (!feedBlockEnabled) { return null; } var nodeInfos = cluster.getNodeInfos(); var exhaustions = enumerateNodeResourceExhaustionsAcrossAllNodes(nodeInfos); if (exhaustions.isEmpty()) { return null; } int maxDescriptions = 3; String description = exhaustions.stream() .limit(maxDescriptions) .map(NodeResourceExhaustion::toExhaustionAddedDescription) .collect(Collectors.joining(", ")); if (exhaustions.size() > maxDescriptions) { description += String.format(" (... and %d more)", exhaustions.size() - maxDescriptions); } description = decoratedMessage(cluster, description); // FIXME we currently will trigger a cluster state recomputation even if the number of // exhaustions is greater than what is returned as part of the description. Though at // that point, cluster state recomputations will be the least of your worries...! return ClusterStateBundle.FeedBlock.blockedWith(description, exhaustions); }
@Test void missing_or_malformed_rpc_addresses_are_emitted_as_unknown_hostnames() { var calc = new ResourceExhaustionCalculator(true, mapOf(usage("disk", 0.5), usage("memory", 0.8))); var cf = createFixtureWithReportedUsages(forNode(1, usage("disk", 0.51), usage("memory", 0.79)), forNode(2, usage("disk", 0.4), usage("memory", 0.85))); cf.cluster().getNodeInfo(storageNode(1)).setRpcAddress(null); cf.cluster().getNodeInfo(storageNode(2)).setRpcAddress("max mekker"); var feedBlock = calc.inferContentClusterFeedBlockOrNull(cf.cluster()); assertNotNull(feedBlock); assertTrue(feedBlock.blockFeedInCluster()); assertEquals(decorate(cf, "disk on node 1 [unknown hostname] is 51.0% full (the configured limit is 50.0%), " + "memory on node 2 [unknown hostname] is 85.0% full (the configured limit is 80.0%)"), feedBlock.getDescription()); }
@Override public void update(Observable o, Object arg) { if (!(o instanceof NodeListener)) { return; } if (arg == null || !(arg instanceof NodeEvent[])) { return; } NodeEvent[] events = (NodeEvent[]) arg; if (events.length <= 0) { return; } LOG.info("Waiting for Lock to start processing NodeEvents."); lock.lock(); try { LOG.info("Start processing the NodeEvent[" + events.length + "]."); for (NodeEvent e : events) { if (e.getType() == NodeEventTypeEnum.ADD) { addNode(e); } else if (e.getType() == NodeEventTypeEnum.DELETE) { deleteNode(e); } } } catch (Exception e) { LOG.error("Exception occurred while updating Pool.", e); } finally { lock.unlock(); } }
@Test public void testUpdate_onlyOneLeftToRemove() { haDataSource.getDataSourceMap().put("foo", new MockDataSource("foo")); NodeEvent event = new NodeEvent(); event.setNodeName("foo"); event.setType(NodeEventTypeEnum.DELETE); updater.update(new FileNodeListener(), new NodeEvent[]{event}); assertTrue(haDataSource.getDataSourceMap().containsKey("foo")); }
@Override public Integer doCall() throws Exception { JsonObject pluginConfig = loadConfig(); JsonObject plugins = pluginConfig.getMap("plugins"); Optional<PluginType> camelPlugin = PluginType.findByName(name); if (camelPlugin.isPresent()) { if (command == null) { command = camelPlugin.get().getCommand(); } if (description == null) { description = camelPlugin.get().getDescription(); } if (firstVersion == null) { firstVersion = camelPlugin.get().getFirstVersion(); } } if (command == null) { // use plugin name as command command = name; } if (firstVersion == null) { // fallback to version specified firstVersion = version; } JsonObject plugin = new JsonObject(); plugin.put("name", name); plugin.put("command", command); if (firstVersion != null) { plugin.put("firstVersion", firstVersion); } plugin.put("description", description != null ? description : "Plugin %s called with command %s".formatted(name, command)); if (gav == null && (groupId != null && artifactId != null)) { if (version == null) { CamelCatalog catalog = new DefaultCamelCatalog(); version = catalog.getCatalogVersion(); } gav = "%s:%s:%s".formatted(groupId, artifactId, version); } if (gav != null) { plugin.put("dependency", gav); } plugins.put(name, plugin); saveConfig(pluginConfig); return 0; }
@Test public void shouldGenerateProperties() throws Exception { PluginAdd command = new PluginAdd(new CamelJBangMain().withPrinter(printer)); command.name = "foo"; command.doCall(); Assertions.assertEquals("", printer.getOutput()); Assertions.assertEquals("{\"plugins\":{\"foo\":{\"name\":\"foo\",\"command\":\"foo\"," + "\"description\":\"Plugin foo called with command foo\"}}}", PluginHelper.getOrCreatePluginConfig().toJson()); }
@Around(CLIENT_INTERFACE_BATCH_REMOVE_CONFIG) public Object removeConfigByIdsAround(ProceedingJoinPoint pjp, HttpServletRequest request, List<Long> ids) throws Throwable { final ConfigChangePointCutTypes configChangePointCutType = ConfigChangePointCutTypes.REMOVE_BATCH_HTTP; final List<ConfigChangePluginService> pluginServices = getPluginServices( configChangePointCutType); // didn't enabled or add relative plugin if (pluginServices.isEmpty()) { return pjp.proceed(); } ConfigChangeRequest configChangeRequest = new ConfigChangeRequest(configChangePointCutType); configChangeRequest.setArg("dataId", ids.toString()); configChangeRequest.setArg("srcIp", RequestUtil.getRemoteIp(request)); configChangeRequest.setArg("requestIpApp", RequestUtil.getAppName(request)); configChangeRequest.setArg("use", RequestUtil.getSrcUserName(request)); return configChangeServiceHandle(pjp, pluginServices, configChangeRequest); }
@Test void testRemoveConfigByIdsAround() throws Throwable { Mockito.when(configChangePluginService.executeType()).thenReturn(ConfigChangeExecuteTypes.EXECUTE_AFTER_TYPE); ProceedingJoinPoint proceedingJoinPoint = Mockito.mock(ProceedingJoinPoint.class); HttpServletRequest request = Mockito.mock(HttpServletRequest.class); Mockito.when(proceedingJoinPoint.proceed(any())).thenReturn("mock success return"); Object o = configChangeAspect.removeConfigByIdsAround(proceedingJoinPoint, request, Arrays.asList(1L, 2L)); Thread.sleep(20L); // expect service executed. Mockito.verify(configChangePluginService, Mockito.times(1)) .execute(any(ConfigChangeRequest.class), any(ConfigChangeResponse.class)); //expect join point processed success. assertEquals("mock success return", o); }
@Override public Table getTable(String dbName, String tblName) { if (metastore.isPresent()) { return metastore.get().getTable(dbName, tblName); } String fullTableName = getKuduFullTableName(dbName, tblName); try { if (!kuduClient.tableExists(fullTableName)) { LOG.error("Kudu table {}.{} does not exist.", dbName, tblName); return null; } return tables.computeIfAbsent(fullTableName, f -> { org.apache.kudu.client.KuduTable kuduNativeTable; try { kuduNativeTable = kuduClient.openTable(fullTableName); } catch (KuduException e) { throw new StarRocksConnectorException("Failed to open table %s.", fullTableName, e); } List<ColumnSchema> columnSchemas = kuduNativeTable.getSchema().getColumns(); ArrayList<Column> columns = new ArrayList<>(columnSchemas.size()); Set<Integer> parColumnIds = Sets.newHashSet(kuduNativeTable.getPartitionSchema().getRangeSchema().getColumnIds()); List<String> partColNames = Lists.newArrayList(); for (int i = 0; i < columnSchemas.size(); i++) { ColumnSchema columnSchema = columnSchemas.get(i); String fieldName = columnSchema.getName(); boolean isKey = columnSchema.isKey(); String comment = columnSchema.getComment(); Type fieldType = ColumnTypeConverter.fromKuduType(columnSchema); Column column = new Column(fieldName, fieldType, isKey, null, true, null, comment); columns.add(column); if (parColumnIds.contains(i)) { partColNames.add(fieldName); } } return new KuduTable(masterAddresses, this.catalogName, dbName, tblName, fullTableName, columns, partColNames); }); } catch (KuduException e) { throw new StarRocksConnectorException("Failed to get table %s.", fullTableName, e); } }
@Test public void testGetTable(@Mocked org.apache.kudu.client.KuduTable mockedTable) throws KuduException { KuduMetadata metadata = new KuduMetadata(KUDU_CATALOG, new HdfsEnvironment(), KUDU_MASTER, true, SCHEMA_EMULATION_PREFIX, Optional.empty()); new Expectations() { { client.tableExists(anyString); result = true; client.openTable(anyString); result = mockedTable; mockedTable.getSchema(); result = SCHEMA; mockedTable.getPartitionSchema(); result = EMPTY_PARTITION_SCHEMA; } }; Table table = metadata.getTable("db1", "tbl1"); KuduTable kuduTable = (KuduTable) table; Assert.assertEquals("test_kudu_catalog", kuduTable.getCatalogName()); Assert.assertEquals("db1", kuduTable.getDbName()); Assert.assertEquals("tbl1", kuduTable.getTableName()); Assert.assertEquals(2, kuduTable.getColumns().size()); Assert.assertEquals(0, kuduTable.getPartitionColumnNames().size()); Assert.assertEquals(ScalarType.INT, kuduTable.getColumns().get(0).getType()); Assert.assertTrue(kuduTable.getBaseSchema().get(0).isAllowNull()); Assert.assertEquals(ScalarType.createVarcharType(CATALOG_MAX_VARCHAR_LENGTH), kuduTable.getBaseSchema().get(1).getType()); Assert.assertTrue(kuduTable.getBaseSchema().get(1).isAllowNull()); }
@Override public DataType getType() { return DataType.META_DATA; }
@Test public void testGetType() { DataType expected = DataType.META_DATA; DataType actual = executorSubscriber.getType(); assertEquals(expected, actual); }
public boolean isValid(String value) { if (value == null) { return false; } URI uri; // ensure value is a valid URI try { uri = new URI(value); } catch (URISyntaxException e) { return false; } // OK, perfom additional validation String scheme = uri.getScheme(); if (!isValidScheme(scheme)) { return false; } String authority = uri.getRawAuthority(); if ("file".equals(scheme) && (authority == null || "".equals(authority))) { // Special case - file: allows an empty authority return true; // this is a local file - nothing more to do here } else if ("file".equals(scheme) && authority != null && authority.contains(":")) { return false; } else { // Validate the authority if (!isValidAuthority(authority)) { return false; } } if (!isValidPath(uri.getRawPath())) { return false; } if (!isValidQuery(uri.getRawQuery())) { return false; } if (!isValidFragment(uri.getRawFragment())) { return false; } return true; }
@Test public void testValidator290() { UrlValidator validator = new UrlValidator(); assertTrue(validator.isValid("http://xn--h1acbxfam.idn.icann.org/")); // assertTrue(validator.isValid("http://xn--e1afmkfd.xn--80akhbyknj4f")); // Internationalized country code top-level domains assertTrue(validator.isValid("http://test.xn--lgbbat1ad8j")); //Algeria assertTrue(validator.isValid("http://test.xn--fiqs8s")); // China assertTrue(validator.isValid("http://test.xn--fiqz9s")); // China assertTrue(validator.isValid("http://test.xn--wgbh1c")); // Egypt assertTrue(validator.isValid("http://test.xn--j6w193g")); // Hong Kong assertTrue(validator.isValid("http://test.xn--h2brj9c")); // India assertTrue(validator.isValid("http://test.xn--mgbbh1a71e")); // India assertTrue(validator.isValid("http://test.xn--fpcrj9c3d")); // India assertTrue(validator.isValid("http://test.xn--gecrj9c")); // India assertTrue(validator.isValid("http://test.xn--s9brj9c")); // India assertTrue(validator.isValid("http://test.xn--xkc2dl3a5ee0h")); // India assertTrue(validator.isValid("http://test.xn--45brj9c")); // India assertTrue(validator.isValid("http://test.xn--mgba3a4f16a")); // Iran assertTrue(validator.isValid("http://test.xn--mgbayh7gpa")); // Jordan assertTrue(validator.isValid("http://test.xn--mgbc0a9azcg")); // Morocco assertTrue(validator.isValid("http://test.xn--ygbi2ammx")); // Palestinian Territory assertTrue(validator.isValid("http://test.xn--wgbl6a")); // Qatar assertTrue(validator.isValid("http://test.xn--p1ai")); // Russia assertTrue(validator.isValid("http://test.xn--mgberp4a5d4ar")); // Saudi Arabia assertTrue(validator.isValid("http://test.xn--90a3ac")); // Serbia assertTrue(validator.isValid("http://test.xn--yfro4i67o")); // Singapore assertTrue(validator.isValid("http://test.xn--clchc0ea0b2g2a9gcd")); // Singapore assertTrue(validator.isValid("http://test.xn--3e0b707e")); // South Korea assertTrue(validator.isValid("http://test.xn--fzc2c9e2c")); // Sri Lanka assertTrue(validator.isValid("http://test.xn--xkc2al3hye2a")); // Sri Lanka assertTrue(validator.isValid("http://test.xn--ogbpf8fl")); // Syria assertTrue(validator.isValid("http://test.xn--kprw13d")); // Taiwan assertTrue(validator.isValid("http://test.xn--kpry57d")); // Taiwan assertTrue(validator.isValid("http://test.xn--o3cw4h")); // Thailand assertTrue(validator.isValid("http://test.xn--pgbs0dh")); // Tunisia assertTrue(validator.isValid("http://test.xn--mgbaam7a8h")); // United Arab Emirates // Proposed internationalized ccTLDs // assertTrue(validator.isValid("http://test.xn--54b7fta0cc")); // Bangladesh // assertTrue(validator.isValid("http://test.xn--90ae")); // Bulgaria // assertTrue(validator.isValid("http://test.xn--node")); // Georgia // assertTrue(validator.isValid("http://test.xn--4dbrk0ce")); // Israel // assertTrue(validator.isValid("http://test.xn--mgb9awbf")); // Oman // assertTrue(validator.isValid("http://test.xn--j1amh")); // Ukraine // assertTrue(validator.isValid("http://test.xn--mgb2ddes")); // Yemen // Test TLDs // assertTrue(validator.isValid("http://test.xn--kgbechtv")); // Arabic // assertTrue(validator.isValid("http://test.xn--hgbk6aj7f53bba")); // Persian // assertTrue(validator.isValid("http://test.xn--0zwm56d")); // Chinese // assertTrue(validator.isValid("http://test.xn--g6w251d")); // Chinese // assertTrue(validator.isValid("http://test.xn--80akhbyknj4f")); // Russian // assertTrue(validator.isValid("http://test.xn--11b5bs3a9aj6g")); // Hindi // assertTrue(validator.isValid("http://test.xn--jxalpdlp")); // Greek // assertTrue(validator.isValid("http://test.xn--9t4b11yi5a")); // Korean // assertTrue(validator.isValid("http://test.xn--deba0ad")); // Yiddish // assertTrue(validator.isValid("http://test.xn--zckzah")); // Japanese // assertTrue(validator.isValid("http://test.xn--hlcj6aya9esc7a")); // Tamil }
public static void smooth(PointList pointList, double maxElevationDelta) { internSmooth(pointList, 0, pointList.size() - 1, maxElevationDelta); }
@Test public void smoothRamer2() { PointList pl2 = new PointList(3, true); pl2.add(0.001, 0.001, 50); pl2.add(0.0015, 0.0015, 160); pl2.add(0.0016, 0.0015, 150); pl2.add(0.0017, 0.0015, 220); pl2.add(0.002, 0.002, 20); EdgeElevationSmoothingRamer.smooth(pl2, 100); assertEquals(5, pl2.size()); assertEquals(190, pl2.getEle(1), 1); // modify as too small in interval [0,4] assertEquals(210, pl2.getEle(2), 1); // modify as too small in interval [0,4] assertEquals(220, pl2.getEle(3), .1); // keep as it is bigger than maxElevationDelta in interval [0,4] }
public static PDImageXObject createFromFile(PDDocument document, File file) throws IOException { return createFromFile(document, file, 0); }
@Test void testCreateFromFileNumberLock() throws IOException { // copy the source file to a temp directory, as we will be deleting it String tiffG3Path = "src/test/resources/org/apache/pdfbox/pdmodel/graphics/image/ccittg3.tif"; File copiedTiffFile = new File(TESTRESULTSDIR, "ccittg3n.tif"); Files.copy(new File(tiffG3Path).toPath(), copiedTiffFile.toPath(), StandardCopyOption.REPLACE_EXISTING); PDDocument document = new PDDocument(); CCITTFactory.createFromFile(document, copiedTiffFile, 0); assertTrue(copiedTiffFile.delete()); }
@Override synchronized public void close() { if (stream != null) { IOUtils.cleanupWithLogger(LOG, stream); stream = null; } }
@Test(timeout=120000) public void testRandomBytes() throws Exception { OsSecureRandom random = getOsSecureRandom(); // len = 16 checkRandomBytes(random, 16); // len = 32 checkRandomBytes(random, 32); // len = 128 checkRandomBytes(random, 128); // len = 256 checkRandomBytes(random, 256); random.close(); }
public static RunResponse from(WorkflowInstance instance, int state) { return RunResponse.builder() .workflowId(instance.getWorkflowId()) .workflowVersionId(instance.getWorkflowVersionId()) .workflowInstanceId(instance.getWorkflowInstanceId()) .workflowRunId(instance.getWorkflowRunId()) .workflowUuid(instance.getWorkflowUuid()) .status(Status.fromCode(state)) .timelineEvent(instance.getInitiator().getTimelineEvent()) .build(); }
@Test public void testBuildFromStepId() { RunResponse res = RunResponse.from(instance, "foo"); Assert.assertEquals(RunResponse.Status.DELEGATED, res.getStatus()); res = RunResponse.from(instance, null); Assert.assertEquals(RunResponse.Status.NON_TERMINAL_ERROR, res.getStatus()); }
@Override @CheckForNull public EmailMessage format(Notification notification) { if (!BuiltInQPChangeNotification.TYPE.equals(notification.getType())) { return null; } BuiltInQPChangeNotificationBuilder profilesNotification = parse(notification); StringBuilder message = new StringBuilder("The following built-in profiles have been updated:\n\n"); profilesNotification.getProfiles().stream() .sorted(Comparator.comparing(Profile::getLanguageName).thenComparing(Profile::getProfileName)) .forEach(profile -> { message.append("\"") .append(profile.getProfileName()) .append("\" - ") .append(profile.getLanguageName()) .append(": ") .append(server.getPublicRootUrl()).append("/profiles/changelog?language=") .append(profile.getLanguageKey()) .append("&name=") .append(encode(profile.getProfileName())) .append("&since=") .append(formatDate(new Date(profile.getStartDate()))) .append("&to=") .append(formatDate(new Date(profile.getEndDate()))) .append("\n"); int newRules = profile.getNewRules(); if (newRules > 0) { message.append(" ").append(newRules).append(" new rule") .append(plural(newRules)) .append('\n'); } int updatedRules = profile.getUpdatedRules(); if (updatedRules > 0) { message.append(" ").append(updatedRules).append(" rule") .append(updatedRules > 1 ? "s have been updated" : " has been updated") .append("\n"); } int removedRules = profile.getRemovedRules(); if (removedRules > 0) { message.append(" ").append(removedRules).append(" rule") .append(plural(removedRules)) .append(" removed\n"); } message.append("\n"); }); message.append("This is a good time to review your quality profiles and update them to benefit from the latest evolutions: "); message.append(server.getPublicRootUrl()).append("/profiles"); // And finally return the email that will be sent return new EmailMessage() .setMessageId(BuiltInQPChangeNotification.TYPE) .setSubject("Built-in quality profiles have been updated") .setPlainTextMessage(message.toString()); }
@Test public void notification_contains_count_of_removed_rules() { String profileName = newProfileName(); String languageKey = newLanguageKey(); String languageName = newLanguageName(); BuiltInQPChangeNotificationBuilder notification = new BuiltInQPChangeNotificationBuilder() .addProfile(Profile.newBuilder() .setProfileName(profileName) .setLanguageKey(languageKey) .setLanguageName(languageName) .setRemovedRules(2) .build()); EmailMessage emailMessage = underTest.format(notification.build()); assertMessage(emailMessage, "\n 2 rules removed\n"); }
@Converter(fallback = true) public static <T> T convertTo(Class<T> type, Exchange exchange, Object value, TypeConverterRegistry registry) { if (NodeInfo.class.isAssignableFrom(value.getClass())) { // use a fallback type converter so we can convert the embedded body if the value is NodeInfo NodeInfo ni = (NodeInfo) value; // first try to find a Converter for Node TypeConverter tc = registry.lookup(type, Node.class); if (tc != null) { Node node = NodeOverNodeInfo.wrap(ni); return tc.convertTo(type, exchange, node); } // if this does not exist we can also try NodeList (there are some type converters for that) as // the default Xerces Node implementation also implements NodeList. tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> nil = new LinkedList<>(); nil.add(ni); return tc.convertTo(type, exchange, toDOMNodeList(nil)); } } else if (List.class.isAssignableFrom(value.getClass())) { TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<NodeInfo> lion = new LinkedList<>(); for (Object o : (List<?>) value) { if (o instanceof NodeInfo) { lion.add((NodeInfo) o); } } if (!lion.isEmpty()) { NodeList nl = toDOMNodeList(lion); return tc.convertTo(type, exchange, nl); } } } else if (NodeOverNodeInfo.class.isAssignableFrom(value.getClass())) { // NodeOverNode info is a read-only Node implementation from Saxon. In contrast to the JDK // com.sun.org.apache.xerces.internal.dom.NodeImpl class it does not implement NodeList, but // many Camel type converters are based on that interface. Therefore we convert to NodeList and // try type conversion in the fallback type converter. TypeConverter tc = registry.lookup(type, NodeList.class); if (tc != null) { List<Node> domNodeList = new LinkedList<>(); domNodeList.add((NodeOverNodeInfo) value); return tc.convertTo(type, exchange, new DOMNodeList(domNodeList)); } } return null; }
@Test public void convertToDOMSource() { DOMSource source = context.getTypeConverter().convertTo(DOMSource.class, exchange, doc); assertNotNull(source); String string = context.getTypeConverter().convertTo(String.class, exchange, source); assertEquals(CONTENT, string); }
static < RequestT, ResponseT, CallerSetupTeardownT extends Caller<RequestT, ResponseT> & SetupTeardown> Call<RequestT, ResponseT> ofCallerAndSetupTeardown( CallerSetupTeardownT implementsCallerAndSetupTeardown, Coder<ResponseT> responseTCoder) { implementsCallerAndSetupTeardown = SerializableUtils.ensureSerializable(implementsCallerAndSetupTeardown); return new Call<>( Configuration.<RequestT, ResponseT>builder() .setCaller(implementsCallerAndSetupTeardown) .setResponseCoder(responseTCoder) .setSetupTeardown(implementsCallerAndSetupTeardown) .build()); }
@Test public void givenSetupTeardownNotSerializable_throwsError() { assertThrows( IllegalArgumentException.class, () -> Call.ofCallerAndSetupTeardown( new UnSerializableCallerWithSetupTeardown(), NON_DETERMINISTIC_RESPONSE_CODER)); }
@Override public String getServletInfo() { return SERVLET_NAME; }
@Test public void getServletInfo_shouldReturnOwnDefinedServletName() { ApiV2Servlet underTest = new ApiV2Servlet(); assertThat(underTest.getServletInfo()) .isEqualTo(ApiV2Servlet.SERVLET_NAME); }
@Override public void createNode(OpenstackNode osNode) { checkNotNull(osNode, ERR_NULL_NODE); OpenstackNode updatedNode; if (osNode.intgBridge() == null && osNode.type() != CONTROLLER) { String deviceIdStr = genDpid(deviceIdCounter.incrementAndGet()); checkNotNull(deviceIdStr, ERR_NULL_DEVICE_ID); updatedNode = osNode.updateIntbridge(DeviceId.deviceId(deviceIdStr)); checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()), NOT_DUPLICATED_MSG, updatedNode.intgBridge()); } else { updatedNode = osNode; checkArgument(!hasIntgBridge(updatedNode.intgBridge(), updatedNode.hostname()), NOT_DUPLICATED_MSG, updatedNode.intgBridge()); } osNodeStore.createNode(updatedNode); log.info(String.format(MSG_NODE, osNode.hostname(), MSG_CREATED)); }
@Test(expected = IllegalArgumentException.class) public void testCreateDuplicateNode() { target.createNode(COMPUTE_1); target.createNode(COMPUTE_1); }
public static Builder forPage(int page) { return new Builder(page); }
@Test void forPage_fails_with_IAE_if_page_is_0() { assertThatThrownBy(() -> forPage(0)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("page index must be >= 1"); }
public static int scanForGap( final UnsafeBuffer termBuffer, final int termId, final int termOffset, final int limitOffset, final GapHandler handler) { int offset = termOffset; do { final int frameLength = frameLengthVolatile(termBuffer, offset); if (frameLength <= 0) { break; } offset += align(frameLength, FRAME_ALIGNMENT); } while (offset < limitOffset); final int gapBeginOffset = offset; if (offset < limitOffset) { final int limit = limitOffset - ALIGNED_HEADER_LENGTH; while (offset < limit) { offset += FRAME_ALIGNMENT; if (0 != termBuffer.getIntVolatile(offset)) { offset -= ALIGNED_HEADER_LENGTH; break; } } final int gapLength = (offset - gapBeginOffset) + ALIGNED_HEADER_LENGTH; handler.onGap(termId, gapBeginOffset, gapLength); } return gapBeginOffset; }
@Test void shouldReportNoGapWhenHwmIsInPadding() { final int paddingLength = align(HEADER_LENGTH, FRAME_ALIGNMENT) * 2; final int tail = LOG_BUFFER_CAPACITY - paddingLength; final int highWaterMark = LOG_BUFFER_CAPACITY - paddingLength + HEADER_LENGTH; when(termBuffer.getIntVolatile(tail)).thenReturn(paddingLength); when(termBuffer.getIntVolatile(tail + HEADER_LENGTH)).thenReturn(0); assertEquals( LOG_BUFFER_CAPACITY, TermGapScanner.scanForGap(termBuffer, TERM_ID, tail, highWaterMark, gapHandler)); verifyNoInteractions(gapHandler); }
@Override public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { ObjectUtil.checkNotNull(command, "command"); ObjectUtil.checkNotNull(unit, "unit"); if (initialDelay < 0) { throw new IllegalArgumentException( String.format("initialDelay: %d (expected: >= 0)", initialDelay)); } if (period <= 0) { throw new IllegalArgumentException( String.format("period: %d (expected: > 0)", period)); } validateScheduled0(initialDelay, unit); validateScheduled0(period, unit); return schedule(new ScheduledFutureTask<Void>( this, command, deadlineNanos(getCurrentTimeNanos(), unit.toNanos(initialDelay)), unit.toNanos(period))); }
@Test public void testScheduleAtFixedRateRunnableNegative() { final TestScheduledEventExecutor executor = new TestScheduledEventExecutor(); assertThrows(IllegalArgumentException.class, new Executable() { @Override public void execute() { executor.scheduleAtFixedRate(TEST_RUNNABLE, 0, -1, TimeUnit.DAYS); } }); }
@Override public Map<ExecutionAttemptID, ExecutionSlotAssignment> allocateSlotsFor( List<ExecutionAttemptID> executionAttemptIds) { final Map<ExecutionVertexID, ExecutionAttemptID> vertexIdToExecutionId = new HashMap<>(); executionAttemptIds.forEach( executionId -> vertexIdToExecutionId.put(executionId.getExecutionVertexId(), executionId)); checkState( vertexIdToExecutionId.size() == executionAttemptIds.size(), "SlotSharingExecutionSlotAllocator does not support one execution vertex to have multiple concurrent executions"); final List<ExecutionVertexID> vertexIds = executionAttemptIds.stream() .map(ExecutionAttemptID::getExecutionVertexId) .collect(Collectors.toList()); return allocateSlotsForVertices(vertexIds).stream() .collect( Collectors.toMap( vertexAssignment -> vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment -> new ExecutionSlotAssignment( vertexIdToExecutionId.get( vertexAssignment.getExecutionVertexId()), vertexAssignment.getLogicalSlotFuture()))); }
@Test void testSchedulePendingRequestBulkTimeoutCheck() { TestingPhysicalSlotRequestBulkChecker bulkChecker = new TestingPhysicalSlotRequestBulkChecker(); AllocationContext context = createBulkCheckerContextWithEv12GroupAndEv3Group(bulkChecker); context.allocateSlotsFor(EV1, EV3); PhysicalSlotRequestBulk bulk = bulkChecker.getBulk(); assertThat(bulk.getPendingRequests()).hasSize(2); assertThat(bulk.getPendingRequests()) .containsExactlyInAnyOrder(RESOURCE_PROFILE.multiply(2), RESOURCE_PROFILE); assertThat(bulk.getAllocationIdsOfFulfilledRequests()).isEmpty(); assertThat(bulkChecker.getTimeout()).isEqualTo(ALLOCATION_TIMEOUT); }
@SuppressWarnings("MethodLength") static void dissectControlRequest( final ArchiveEventCode eventCode, final MutableDirectBuffer buffer, final int offset, final StringBuilder builder) { int encodedLength = dissectLogHeader(CONTEXT, eventCode, buffer, offset, builder); HEADER_DECODER.wrap(buffer, offset + encodedLength); encodedLength += MessageHeaderDecoder.ENCODED_LENGTH; switch (eventCode) { case CMD_IN_CONNECT: CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendConnect(builder); break; case CMD_IN_CLOSE_SESSION: CLOSE_SESSION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendCloseSession(builder); break; case CMD_IN_START_RECORDING: START_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording(builder); break; case CMD_IN_STOP_RECORDING: STOP_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecording(builder); break; case CMD_IN_REPLAY: REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplay(builder); break; case CMD_IN_STOP_REPLAY: STOP_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplay(builder); break; case CMD_IN_LIST_RECORDINGS: LIST_RECORDINGS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordings(builder); break; case CMD_IN_LIST_RECORDINGS_FOR_URI: LIST_RECORDINGS_FOR_URI_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingsForUri(builder); break; case CMD_IN_LIST_RECORDING: LIST_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecording(builder); break; case CMD_IN_EXTEND_RECORDING: EXTEND_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording(builder); break; case CMD_IN_RECORDING_POSITION: RECORDING_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendRecordingPosition(builder); break; case CMD_IN_TRUNCATE_RECORDING: TRUNCATE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTruncateRecording(builder); break; case CMD_IN_STOP_RECORDING_SUBSCRIPTION: STOP_RECORDING_SUBSCRIPTION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingSubscription(builder); break; case CMD_IN_STOP_POSITION: STOP_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopPosition(builder); break; case CMD_IN_FIND_LAST_MATCHING_RECORD: FIND_LAST_MATCHING_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendFindLastMatchingRecord(builder); break; case CMD_IN_LIST_RECORDING_SUBSCRIPTIONS: LIST_RECORDING_SUBSCRIPTIONS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendListRecordingSubscriptions(builder); break; case CMD_IN_START_BOUNDED_REPLAY: BOUNDED_REPLAY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartBoundedReplay(builder); break; case CMD_IN_STOP_ALL_REPLAYS: STOP_ALL_REPLAYS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopAllReplays(builder); break; case CMD_IN_REPLICATE: REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate(builder); break; case CMD_IN_STOP_REPLICATION: STOP_REPLICATION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopReplication(builder); break; case CMD_IN_START_POSITION: START_POSITION_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartPosition(builder); break; case CMD_IN_DETACH_SEGMENTS: DETACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDetachSegments(builder); break; case CMD_IN_DELETE_DETACHED_SEGMENTS: DELETE_DETACHED_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendDeleteDetachedSegments(builder); break; case CMD_IN_PURGE_SEGMENTS: PURGE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeSegments(builder); break; case CMD_IN_ATTACH_SEGMENTS: ATTACH_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAttachSegments(builder); break; case CMD_IN_MIGRATE_SEGMENTS: MIGRATE_SEGMENTS_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendMigrateSegments(builder); break; case CMD_IN_AUTH_CONNECT: AUTH_CONNECT_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendAuthConnect(builder); break; case CMD_IN_KEEP_ALIVE: KEEP_ALIVE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendKeepAlive(builder); break; case CMD_IN_TAGGED_REPLICATE: TAGGED_REPLICATE_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendTaggedReplicate(builder); break; case CMD_IN_START_RECORDING2: START_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStartRecording2(builder); break; case CMD_IN_EXTEND_RECORDING2: EXTEND_RECORDING_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendExtendRecording2(builder); break; case CMD_IN_STOP_RECORDING_BY_IDENTITY: STOP_RECORDING_BY_IDENTITY_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendStopRecordingByIdentity(builder); break; case CMD_IN_PURGE_RECORDING: PURGE_RECORDING_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendPurgeRecording(builder); break; case CMD_IN_REPLICATE2: REPLICATE_REQUEST2_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplicate2(builder); break; case CMD_IN_REQUEST_REPLAY_TOKEN: REPLAY_TOKEN_REQUEST_DECODER.wrap( buffer, offset + encodedLength, HEADER_DECODER.blockLength(), HEADER_DECODER.version()); appendReplayToken(builder); break; default: builder.append(": unknown command"); } }
@Test void controlRequestStopPosition() { internalEncodeLogHeader(buffer, 0, 12, 32, () -> 10_000_000_000L); final StopPositionRequestEncoder requestEncoder = new StopPositionRequestEncoder(); requestEncoder.wrapAndApplyHeader(buffer, LOG_HEADER_LENGTH, headerEncoder) .controlSessionId(22) .correlationId(33) .recordingId(44); dissectControlRequest(CMD_IN_STOP_POSITION, buffer, 0, builder); assertEquals("[10.000000000] " + CONTEXT + ": " + CMD_IN_STOP_POSITION.name() + " [12/32]:" + " controlSessionId=22" + " correlationId=33" + " recordingId=44", builder.toString()); }
public <T extends Tuple> DataSource<T> tupleType(Class<T> targetType) { Preconditions.checkNotNull(targetType, "The target type class must not be null."); if (!Tuple.class.isAssignableFrom(targetType)) { throw new IllegalArgumentException( "The target type must be a subclass of " + Tuple.class.getName()); } @SuppressWarnings("unchecked") TupleTypeInfo<T> typeInfo = (TupleTypeInfo<T>) TypeExtractor.createTypeInfo(targetType); CsvInputFormat<T> inputFormat = new TupleCsvInputFormat<T>( path, this.lineDelimiter, this.fieldDelimiter, typeInfo, this.includedMask); Class<?>[] classes = new Class<?>[typeInfo.getArity()]; for (int i = 0; i < typeInfo.getArity(); i++) { classes[i] = typeInfo.getTypeAt(i).getTypeClass(); } configureInputFormat(inputFormat); return new DataSource<T>( executionContext, inputFormat, typeInfo, Utils.getCallLocationName()); }
@Test void testFieldTypes() { CsvReader reader = getCsvReader(); DataSource<Item> items = reader.tupleType(Item.class); TypeInformation<?> info = items.getType(); if (!info.isTupleType()) { fail(""); } else { TupleTypeInfo<?> tinfo = (TupleTypeInfo<?>) info; assertThat(tinfo.getTypeAt(0)).isEqualTo(BasicTypeInfo.INT_TYPE_INFO); assertThat(tinfo.getTypeAt(1)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); assertThat(tinfo.getTypeAt(2)).isEqualTo(BasicTypeInfo.DOUBLE_TYPE_INFO); assertThat(tinfo.getTypeAt(3)).isEqualTo(BasicTypeInfo.STRING_TYPE_INFO); } CsvInputFormat<?> inputFormat = (CsvInputFormat<?>) items.getInputFormat(); assertThat(inputFormat.getFieldTypes()) .containsExactly(Integer.class, String.class, Double.class, String.class); }
@Override public boolean remove(String key) { repo.lock(key); try { return repo.remove(key) != null; } finally { repo.unlock(key); } }
@Test public void testRemove() throws Exception { // ADD key to remove assertTrue(repo.add(key01)); assertTrue(repo.add(key02)); assertEquals(2, cache.size()); // CLEAR repo repo.clear(); assertEquals(0, cache.size()); }
@Override public boolean equals(Object obj) { if (obj instanceof COSString) { COSString strObj = (COSString) obj; return getString().equals(strObj.getString()) && forceHexForm == strObj.forceHexForm; } return false; }
@Test void testEquals() { // Check all these several times for consistency for (int i = 0; i < 10; i++) { // Reflexive COSString x1 = new COSString("Test"); assertEquals(x1, x1); // Symmetry i.e. if x == y then y == x COSString y1 = new COSString("Test"); assertEquals(x1, y1); assertEquals(y1, x1); COSString x2 = new COSString("Test", true); // also if x != y then y != x assertNotEquals(x1, x2); assertNotEquals(x2, x1); // Transitive if x == y && y == z then x == z COSString z1 = new COSString("Test"); assertEquals(x1, y1); assertEquals(y1, z1); assertEquals(x1, z1); // Test the negative as well if x1 == y1 && y1 != x2 then x1 != x2 assertEquals(x1, y1); assertNotEquals(y1, x2); assertNotEquals(x1, x2); } }
public static boolean match(String pattern, String string) { assertSingleByte(string); return match(pattern.getBytes(StandardCharsets.US_ASCII), string.getBytes(StandardCharsets.US_ASCII)); }
@Test public void testValidInputBytes() { assertFalse(GlobMatcher.match(new byte[] { 'a', '*', 'b'}, new byte[] { 'a', 'a', 'a' })); assertTrue(GlobMatcher.match(new byte[] { 'a', '*', 'b'}, new byte[] { 'a', 'a', 'a', 'b' })); assertTrue(GlobMatcher.match(new byte[] { 'a', '*', '[', '0', '-', '9', ']', 'b'}, new byte[] { 'a', 'H', '5', 'b' })); }
@OnlyForTest Future<Message> getHeartbeatInFly() { return this.heartbeatInFly; }
@Test public void testSetErrorTimeout() throws Exception { final Replicator r = getReplicator(); this.id.unlock(); assertNull(r.getHeartbeatInFly()); final RpcRequests.AppendEntriesRequest request = createEmptyEntriesRequest(true); Mockito.when( this.rpcService.appendEntries(eq(this.peerId.getEndpoint()), eq(request), eq(this.opts.getElectionTimeoutMs() / 2), Mockito.any())).thenReturn(new FutureImpl<>()); this.id.setError(RaftError.ETIMEDOUT.getNumber()); Thread.sleep(this.opts.getElectionTimeoutMs() + 1000); assertNotNull(r.getHeartbeatInFly()); }
@VisibleForTesting List<KeyValue<String, Object>> buildTemplateParams(SmsTemplateDO template, Map<String, Object> templateParams) { return template.getParams().stream().map(key -> { Object value = templateParams.get(key); if (value == null) { throw exception(SMS_SEND_MOBILE_TEMPLATE_PARAM_MISS, key); } return new KeyValue<>(key, value); }).collect(Collectors.toList()); }
@Test public void testBuildTemplateParams_paramMiss() { // 准备参数 SmsTemplateDO template = randomPojo(SmsTemplateDO.class, o -> o.setParams(Lists.newArrayList("code"))); Map<String, Object> templateParams = new HashMap<>(); // mock 方法 // 调用,并断言异常 assertServiceException(() -> smsSendService.buildTemplateParams(template, templateParams), SMS_SEND_MOBILE_TEMPLATE_PARAM_MISS, "code"); }
static Optional<String> globalResponseError(Optional<ClientResponse> response) { if (!response.isPresent()) { return Optional.of("Timeout"); } if (response.get().authenticationException() != null) { return Optional.of("AuthenticationException"); } if (response.get().wasTimedOut()) { return Optional.of("Disonnected[Timeout]"); } if (response.get().wasDisconnected()) { return Optional.of("Disconnected"); } if (response.get().versionMismatch() != null) { return Optional.of("UnsupportedVersionException"); } if (response.get().responseBody() == null) { return Optional.of("EmptyResponse"); } if (!(response.get().responseBody() instanceof AssignReplicasToDirsResponse)) { return Optional.of("ClassCastException"); } AssignReplicasToDirsResponseData data = ((AssignReplicasToDirsResponse) response.get().responseBody()).data(); Errors error = Errors.forCode(data.errorCode()); if (error != Errors.NONE) { return Optional.of("Response-level error: " + error.name()); } return Optional.empty(); }
@Test public void testNoGlobalResponseError() { assertEquals(Optional.empty(), AssignmentsManager.globalResponseError(Optional.of( new ClientResponse(null, null, "", 0, 0, false, null, null, new AssignReplicasToDirsResponse( new AssignReplicasToDirsResponseData()))))); }
@Override public CompletableFuture<Void> relinquishMastership(DeviceId deviceId) { checkPermission(CLUSTER_WRITE); return store.relinquishRole(localNodeId, deviceId) .thenAccept(this::post) .thenApply(v -> null); }
@Test public void relinquishMastership() { //no backups - should just turn to NONE for device. mgr.setRole(NID_LOCAL, DEV_MASTER, MASTER); assertEquals("wrong role:", MASTER, mgr.getLocalRole(DEV_MASTER)); mgr.relinquishMastership(DEV_MASTER); assertNull("wrong master:", mgr.getMasterFor(DEV_OTHER)); assertEquals("wrong role:", NONE, mgr.getLocalRole(DEV_MASTER)); //not master, nothing should happen mgr.setRole(NID_LOCAL, DEV_OTHER, NONE); mgr.relinquishMastership(DEV_OTHER); assertNull("wrong role:", mgr.getMasterFor(DEV_OTHER)); //provide NID_OTHER as backup and relinquish mgr.setRole(NID_LOCAL, DEV_MASTER, MASTER); assertEquals("wrong master:", NID_LOCAL, mgr.getMasterFor(DEV_MASTER)); mgr.setRole(NID_OTHER, DEV_MASTER, STANDBY); mgr.relinquishMastership(DEV_MASTER); assertEquals("wrong master:", NID_OTHER, mgr.getMasterFor(DEV_MASTER)); }
@Override public boolean dropTable(TableIdentifier identifier, boolean purge) { if (!tableExists(identifier)) { return false; } EcsURI tableObjectURI = tableURI(identifier); if (purge) { // if re-use the same instance, current() will throw exception. TableOperations ops = newTableOps(identifier); TableMetadata current = ops.current(); if (current == null) { return false; } CatalogUtil.dropTableData(ops.io(), current); } client.deleteObject(tableObjectURI.bucket(), tableObjectURI.name()); return true; }
@Test public void testDropTable() { ecsCatalog.createTable(TableIdentifier.of("a"), SCHEMA); assertThat(ecsCatalog.dropTable(TableIdentifier.of("unknown"))) .as("Drop an unknown table return false") .isFalse(); assertThat(ecsCatalog.dropTable(TableIdentifier.of("a"), true)).as("Drop a table").isTrue(); }
public void registerDelaySuppliers(String tableName, String segmentName, String columnName, int partition, Supplier<Integer> numDocsDelaySupplier, Supplier<Long> timeMsDelaySupplier) { _lock.lock(); try { TableDelay tableDelay = _tableToPartitionToDelayMs.getOrDefault(tableName, new TableDelay(tableName)); tableDelay.registerDelaySuppliers(segmentName, columnName, partition, numDocsDelaySupplier, timeMsDelaySupplier); _tableToPartitionToDelayMs.put(tableName, tableDelay); } finally { _lock.unlock(); } }
@Test public void testEmitsMaxDelayPerPartition() { _realtimeLuceneIndexingDelayTracker.registerDelaySuppliers("table1", "segment1", "column1", 1, () -> 10, () -> 20L); _realtimeLuceneIndexingDelayTracker.registerDelaySuppliers("table1", "segment2", "column1", 1, () -> 5, () -> 15L); _realtimeLuceneIndexingDelayTracker.registerDelaySuppliers("table2", "segment1", "column1", 1, () -> 25, () -> 30L); verifyGaugeValue("table1", 1, ServerGauge.LUCENE_INDEXING_DELAY_DOCS, 10); verifyGaugeValue("table1", 1, ServerGauge.LUCENE_INDEXING_DELAY_MS, 20); verifyGaugeValue("table2", 1, ServerGauge.LUCENE_INDEXING_DELAY_DOCS, 25); verifyGaugeValue("table2", 1, ServerGauge.LUCENE_INDEXING_DELAY_MS, 30); }
public boolean overlaps(final BoundingBox pBoundingBox, double pZoom) { //FIXME this is a total hack but it works around a number of issues related to vertical map //replication and horiztonal replication that can cause polygons to completely disappear when //panning if (pZoom < 3) return true; boolean latMatch = false; boolean lonMatch = false; //vertical wrapping detection if (pBoundingBox.mLatSouth <= mLatNorth && pBoundingBox.mLatSouth >= mLatSouth) latMatch = true; //normal case, non overlapping if (mLonWest >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast) lonMatch = true; //normal case, non overlapping if (mLonEast >= pBoundingBox.mLonWest && mLonWest <= pBoundingBox.mLonEast) lonMatch = true; //special case for when *this completely surrounds the pBoundbox if (mLonWest <= pBoundingBox.mLonWest && mLonEast >= pBoundingBox.mLonEast && mLatNorth >= pBoundingBox.mLatNorth && mLatSouth <= pBoundingBox.mLatSouth) return true; //normal case, non overlapping if (mLatNorth >= pBoundingBox.mLatSouth && mLatNorth <= mLatSouth) latMatch = true; //normal case, non overlapping if (mLatSouth >= pBoundingBox.mLatSouth && mLatSouth <= mLatSouth) latMatch = true; if (mLonWest > mLonEast) { //the date line is included in the bounding box //we want to match lon from the dateline to the eastern bounds of the box //and the dateline to the western bounds of the box if (mLonEast <= pBoundingBox.mLonEast && pBoundingBox.mLonWest >= mLonWest) lonMatch = true; if (mLonWest >= pBoundingBox.mLonEast && mLonEast <= pBoundingBox.mLonEast) { lonMatch = true; if (pBoundingBox.mLonEast < mLonWest && pBoundingBox.mLonWest < mLonWest) lonMatch = false; if (pBoundingBox.mLonEast > mLonEast && pBoundingBox.mLonWest > mLonEast) lonMatch = false; } if (mLonWest >= pBoundingBox.mLonEast && mLonEast >= pBoundingBox.mLonEast) { lonMatch = true; } /* //that is completely within this if (mLonWest>= pBoundingBox.mLonEast && mLonEast<= pBoundingBox.mLonEast) { lonMatch = true; if (pBoundingBox.mLonEast < mLonWest && pBoundingBox.mLonWest < mLonWest) lonMatch = false; if (pBoundingBox.mLonEast > mLonEast && pBoundingBox.mLonWest > mLonEast ) lonMatch = false; } if (mLonWest>= pBoundingBox.mLonEast && mLonEast>= pBoundingBox.mLonEast) { lonMatch = true; }*/ } return latMatch && lonMatch; }
@Test public void testNorthernBoundsSimple() { //item's southern bounds of itemis just out of view BoundingBox view = new BoundingBox(2, 2, -2, -2); BoundingBox item = new BoundingBox(2.1, 2, 0, -2); Assert.assertTrue(view.overlaps(item, 4)); item = new BoundingBox(2.1, 2, 1.9, -2); Assert.assertTrue(view.overlaps(item, 4)); item = new BoundingBox(3.1, 2, 1.999999999, -2); Assert.assertTrue(view.overlaps(item, 4)); item = new BoundingBox(3.1, 2, 2.0, -2); Assert.assertTrue(view.overlaps(item, 4)); item = new BoundingBox(3.1, 2, 2.1, -2); Assert.assertFalse(view.overlaps(item, 4)); }
@Override public List<Set<TopicPartition>> assignPartitions(Cluster cluster, int numMetricFetchers) { if (numMetricFetchers != SUPPORTED_NUM_METRIC_FETCHER) { throw new IllegalArgumentException("DefaultMetricSamplerPartitionAssignor supports only a single metric fetcher."); } // Create an array to host the assignment of all the metric fetchers. List<Set<TopicPartition>> assignments = new ArrayList<>(); assignments.add(assignPartitions(cluster)); return assignments; }
@Test public void testAssignment() { int maxNumPartitionsForTopic = -1; int totalNumPartitions = 0; // Prepare the metadata Set<PartitionInfo> partitions = new HashSet<>(); for (int i = 0; i < NUM_TOPICS; i++) { // Random number of partitions ranging from 4 to 400 int randomNumPartitions = 4 * (RANDOM.nextInt(100) + 1); maxNumPartitionsForTopic = Math.max(randomNumPartitions, maxNumPartitionsForTopic); totalNumPartitions += randomNumPartitions; for (int j = 0; j < randomNumPartitions; j++) { partitions.add(new PartitionInfo(TOPIC_PREFIX + i, j, NODE_0, nodes(), nodes())); } } Cluster cluster = new Cluster("cluster", Arrays.asList(nodes()), partitions, Collections.emptySet(), Collections.emptySet()); Metadata metadata = new Metadata(METADATA_REFRESH_BACKOFF, METADATA_EXPIRY_MS, new LogContext(), new ClusterResourceListeners()); Map<String, Set<PartitionInfo>> topicToTopicPartitions = new HashMap<>(); for (PartitionInfo tp : partitions) { topicToTopicPartitions.putIfAbsent(tp.topic(), new HashSet<>()); topicToTopicPartitions.get(tp.topic()).add(tp); } List<MetadataResponse.TopicMetadata> topicMetadata = new ArrayList<>(partitions.size()); for (Map.Entry<String, Set<PartitionInfo>> entry : topicToTopicPartitions.entrySet()) { List<MetadataResponse.PartitionMetadata> partitionMetadata = new ArrayList<>(entry.getValue().size()); for (PartitionInfo tp : entry.getValue()) { partitionMetadata.add(new MetadataResponse.PartitionMetadata(Errors.NONE, new TopicPartition( tp.topic(), tp.partition()), Optional.of(NODE_0.id()), Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH), nodeIds(), nodeIds(), Collections.emptyList())); } topicMetadata.add(new MetadataResponse.TopicMetadata(Errors.NONE, entry.getKey(), false, partitionMetadata)); } MetadataResponse metadataResponse = KafkaCruiseControlUtils.prepareMetadataResponse(cluster.nodes(), cluster.clusterResource().clusterId(), MetadataResponse.NO_CONTROLLER_ID, topicMetadata); metadata.update(KafkaCruiseControlUtils.REQUEST_VERSION_UPDATE, metadataResponse, false, 0); MetricSamplerPartitionAssignor assignor = new DefaultMetricSamplerPartitionAssignor(); Set<TopicPartition> assignment = assignor.assignPartitions(metadata.fetch()); int maxAssignedNumPartitionsForFetcher = -1; int minAssignedNumPartitionsForFetcher = Integer.MAX_VALUE; int totalAssignedNumPartitions = 0; maxAssignedNumPartitionsForFetcher = Math.max(maxAssignedNumPartitionsForFetcher, assignment.size()); minAssignedNumPartitionsForFetcher = Math.min(minAssignedNumPartitionsForFetcher, assignment.size()); Set<TopicPartition> uniqueAssignedPartitions = new HashSet<>(assignment); totalAssignedNumPartitions += assignment.size(); // Make sure all the partitions are assigned and there is no double assignment. assertEquals("Total assigned number of partitions should be " + totalNumPartitions, totalNumPartitions, totalAssignedNumPartitions); assertEquals("Total number of unique assigned partitions should be " + totalNumPartitions, totalNumPartitions, uniqueAssignedPartitions.size()); int avgAssignedPartitionsPerFetcher = totalNumPartitions; assertTrue("In the worst case the max number of partitions assigned to a metric fetchers should not differ by " + "more than the partition number of the biggest topic, which is " + maxNumPartitionsForTopic, maxAssignedNumPartitionsForFetcher - avgAssignedPartitionsPerFetcher <= maxNumPartitionsForTopic); assertTrue("In the worst case the min number of partitions assigned to a metric fetchers should not differ by " + "more than the partition number of the biggest topic, which is " + maxNumPartitionsForTopic, avgAssignedPartitionsPerFetcher - minAssignedNumPartitionsForFetcher <= maxNumPartitionsForTopic); }
@NonNull @Override public EncodeStrategy getEncodeStrategy(@NonNull Options options) { Boolean encodeTransformation = options.get(ENCODE_TRANSFORMATION); return encodeTransformation != null && encodeTransformation ? EncodeStrategy.TRANSFORMED : EncodeStrategy.SOURCE; }
@Test public void testEncodeStrategy_withEncodeTransformationTrue_returnsTransformed() { assertThat(encoder.getEncodeStrategy(options)).isEqualTo(EncodeStrategy.TRANSFORMED); }
@PublicAPI(usage = ACCESS, state = EXPERIMENTAL) public static CreatorByRootClass defineByRootClasses(Predicate<? super JavaClass> rootClassPredicate) { return CreatorByRootClass.from(rootClassPredicate); }
@Test public void rejects_overlapping_modules_by_root_classes() { JavaClasses invalidExamples = new ClassFileImporter().importPackages(getExamplePackage("invalid")); assertThatThrownBy( () -> ArchModules .defineByRootClasses(javaClass -> javaClass.getSimpleName().endsWith("Descriptor")) .modularize(invalidExamples) ) .isInstanceOf(IllegalArgumentException.class) .hasMessageContaining("modules would overlap") .hasMessageContaining( com.tngtech.archunit.library.modules.testexamples.invalid.overlapping_root_classes.ModuleOneDescriptor.class.getPackage().getName()) .hasMessageContaining( com.tngtech.archunit.library.modules.testexamples.invalid.overlapping_root_classes.child.ModuleTwoDescriptor.class.getPackage().getName()); }
@Override public ValidationResult responseMessageForIsRepositoryConfigurationValid(String responseBody) { return jsonResultMessageHandler.toValidationResult(responseBody); }
@Test public void shouldBuildSuccessValidationResultFromCheckRepositoryConfigurationValidResponse() throws Exception { assertThat(messageHandler.responseMessageForIsRepositoryConfigurationValid("").isSuccessful(), is(true)); assertThat(messageHandler.responseMessageForIsRepositoryConfigurationValid(null).isSuccessful(), is(true)); }
public static ConfigDefinitionKey parseConfigName(Element configE) { if (!configE.getNodeName().equals("config")) { throw new IllegalArgumentException("The root element must be 'config', but was '" + configE.getNodeName() + "'"); } if (!configE.hasAttribute("name")) { throw new IllegalArgumentException ("The 'config' element must have a 'name' attribute that matches the name of the config definition"); } String elementString = configE.getAttribute("name"); if (!elementString.contains(".")) { throw new IllegalArgumentException("The config name '" + elementString + "' contains illegal characters. Only names with the pattern " + namespacePattern.pattern() + "." + namePattern.pattern() + " are legal."); } Tuple2<String, String> t = ConfigUtils.getNameAndNamespaceFromString(elementString); String xmlName = t.first; String xmlNamespace = t.second; if (!validName(xmlName)) { throw new IllegalArgumentException("The config name '" + xmlName + "' contains illegal characters. Only names with the pattern " + namePattern.toString() + " are legal."); } if (!validNamespace(xmlNamespace)) { throw new IllegalArgumentException("The config namespace '" + xmlNamespace + "' contains illegal characters. Only namespaces with the pattern " + namespacePattern.toString() + " are legal."); } return new ConfigDefinitionKey(xmlName, xmlNamespace); }
@Test void testNameParsing() { Element configRoot = getDocument(new StringReader("<config name=\"test.function-test\" version=\"1\">" + "<int_val>1</int_val> +" + "</config>")); ConfigDefinitionKey key = DomConfigPayloadBuilder.parseConfigName(configRoot); assertEquals("function-test", key.getName()); assertEquals("test", key.getNamespace()); }
@Override public Multimap<String, String> findBundlesForUnloading(final LoadData loadData, final ServiceConfiguration conf) { selectedBundlesCache.clear(); Map<String, BrokerData> brokersData = loadData.getBrokerData(); Map<String, BundleData> loadBundleData = loadData.getBundleDataForLoadShedding(); Map<String, Long> recentlyUnloadedBundles = loadData.getRecentlyUnloadedBundles(); MutableObject<String> msgRateOverloadedBroker = new MutableObject<>(); MutableObject<String> msgThroughputOverloadedBroker = new MutableObject<>(); MutableObject<String> msgRateUnderloadedBroker = new MutableObject<>(); MutableObject<String> msgThroughputUnderloadedBroker = new MutableObject<>(); MutableDouble maxMsgRate = new MutableDouble(-1); MutableDouble maxThroughput = new MutableDouble(-1); MutableDouble minMsgRate = new MutableDouble(Integer.MAX_VALUE); MutableDouble minThroughput = new MutableDouble(Integer.MAX_VALUE); brokersData.forEach((broker, data) -> { double msgRate = data.getLocalData().getMsgRateIn() + data.getLocalData().getMsgRateOut(); double throughputRate = data.getLocalData().getMsgThroughputIn() + data.getLocalData().getMsgThroughputOut(); if (msgRate > maxMsgRate.getValue()) { msgRateOverloadedBroker.setValue(broker); maxMsgRate.setValue(msgRate); } if (throughputRate > maxThroughput.getValue()) { msgThroughputOverloadedBroker.setValue(broker); maxThroughput.setValue(throughputRate); } if (msgRate < minMsgRate.getValue()) { msgRateUnderloadedBroker.setValue(broker); minMsgRate.setValue(msgRate); } if (throughputRate < minThroughput.getValue()) { msgThroughputUnderloadedBroker.setValue(broker); minThroughput.setValue(throughputRate); } }); // find the difference between two brokers based on msgRate and throughout and check if the load distribution // discrepancy is higher than threshold. if that matches then try to unload bundle from overloaded brokers to // give chance of uniform load distribution. if (minMsgRate.getValue() <= EPS && minMsgRate.getValue() >= -EPS) { minMsgRate.setValue(1.0); } if (minThroughput.getValue() <= EPS && minThroughput.getValue() >= -EPS) { minThroughput.setValue(1.0); } double msgRateDifferencePercentage = ((maxMsgRate.getValue() - minMsgRate.getValue()) * 100) / (minMsgRate.getValue()); double msgThroughputDifferenceRate = maxThroughput.getValue() / minThroughput.getValue(); // if the threshold matches then find out how much load needs to be unloaded by considering number of msgRate // and throughput. boolean isMsgRateThresholdExceeded = conf.getLoadBalancerMsgRateDifferenceShedderThreshold() > 0 && msgRateDifferencePercentage > conf.getLoadBalancerMsgRateDifferenceShedderThreshold(); boolean isMsgThroughputThresholdExceeded = conf .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold() > 0 && msgThroughputDifferenceRate > conf .getLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(); if (isMsgRateThresholdExceeded || isMsgThroughputThresholdExceeded) { MutableInt msgRateRequiredFromUnloadedBundles = new MutableInt( (int) ((maxMsgRate.getValue() - minMsgRate.getValue()) * conf.getMaxUnloadPercentage())); MutableInt msgThroughputRequiredFromUnloadedBundles = new MutableInt( (int) ((maxThroughput.getValue() - minThroughput.getValue()) * conf.getMaxUnloadPercentage())); if (isMsgRateThresholdExceeded) { if (log.isDebugEnabled()) { log.debug("Found bundles for uniform load balancing. " + "msgRate overloaded broker: {} with msgRate: {}, " + "msgRate underloaded broker: {} with msgRate: {}", msgRateOverloadedBroker.getValue(), maxMsgRate.getValue(), msgRateUnderloadedBroker.getValue(), minMsgRate.getValue()); } LocalBrokerData overloadedBrokerData = brokersData.get(msgRateOverloadedBroker.getValue()).getLocalData(); if (overloadedBrokerData.getBundles().size() > 1 && (msgRateRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessage())) { // Sort bundles by msgRate, then pick the bundle which can help to reduce load uniformly with // under-loaded broker loadBundleData.entrySet().stream() .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) .map((e) -> { String bundle = e.getKey(); TimeAverageMessageData shortTermData = e.getValue().getShortTermData(); double msgRate = shortTermData.getMsgRateIn() + shortTermData.getMsgRateOut(); return Pair.of(bundle, msgRate); }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { if (conf.getMaxUnloadBundleNumPerShedding() != -1 && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { return; } String bundle = e.getLeft(); double bundleMsgRate = e.getRight(); if (bundleMsgRate <= (msgRateRequiredFromUnloadedBundles.getValue() + 1000/* delta */)) { log.info("Found bundle to unload with msgRate {}", bundleMsgRate); msgRateRequiredFromUnloadedBundles.add(-bundleMsgRate); selectedBundlesCache.put(msgRateOverloadedBroker.getValue(), bundle); } }); } } else { if (log.isDebugEnabled()) { log.debug("Found bundles for uniform load balancing. " + "msgThroughput overloaded broker: {} with msgThroughput {}, " + "msgThroughput underloaded broker: {} with msgThroughput: {}", msgThroughputOverloadedBroker.getValue(), maxThroughput.getValue(), msgThroughputUnderloadedBroker.getValue(), minThroughput.getValue()); } LocalBrokerData overloadedBrokerData = brokersData.get(msgThroughputOverloadedBroker.getValue()).getLocalData(); if (overloadedBrokerData.getBundles().size() > 1 && msgThroughputRequiredFromUnloadedBundles.getValue() >= conf.getMinUnloadMessageThroughput()) { // Sort bundles by throughput, then pick the bundle which can help to reduce load uniformly with // under-loaded broker loadBundleData.entrySet().stream() .filter(e -> overloadedBrokerData.getBundles().contains(e.getKey())) .map((e) -> { String bundle = e.getKey(); TimeAverageMessageData shortTermData = e.getValue().getShortTermData(); double msgThroughput = shortTermData.getMsgThroughputIn() + shortTermData.getMsgThroughputOut(); return Pair.of(bundle, msgThroughput); }).filter(e -> !recentlyUnloadedBundles.containsKey(e.getLeft())) .sorted((e1, e2) -> Double.compare(e2.getRight(), e1.getRight())).forEach((e) -> { if (conf.getMaxUnloadBundleNumPerShedding() != -1 && selectedBundlesCache.size() >= conf.getMaxUnloadBundleNumPerShedding()) { return; } String bundle = e.getLeft(); double msgThroughput = e.getRight(); if (msgThroughput <= (msgThroughputRequiredFromUnloadedBundles.getValue() + 1000/* delta */)) { log.info("Found bundle to unload with msgThroughput {}", msgThroughput); msgThroughputRequiredFromUnloadedBundles.add(-msgThroughput); selectedBundlesCache.put(msgThroughputOverloadedBroker.getValue(), bundle); } }); } } } return selectedBundlesCache; }
@Test public void testOverloadBrokerSelect() { conf.setMaxUnloadBundleNumPerShedding(1); conf.setMaxUnloadPercentage(0.5); int numBrokers = 5; int numBundles = 5; LoadData loadData = new LoadData(); LocalBrokerData[] localBrokerDatas = new LocalBrokerData[]{ new LocalBrokerData(), new LocalBrokerData(), new LocalBrokerData(), new LocalBrokerData(), new LocalBrokerData()}; String[] brokerNames = new String[]{"broker0", "broker1", "broker2", "broker3", "broker4"}; double[] brokerMsgRates = new double[]{ 50000, // broker0 60000, // broker1 70000, // broker2 10000, // broker3 20000};// broker4 double[] brokerMsgThroughputs = new double[]{ 50 * 1024 * 1024, // broker0 60 * 1024 * 1024, // broker1 70 * 1024 * 1024, // broker2 80 * 1024 * 1024, // broker3 10 * 1024 * 1024};// broker4 for (int brokerId = 0; brokerId < numBrokers; brokerId++) { double msgRate = brokerMsgRates[brokerId] / numBundles; double throughput = brokerMsgThroughputs[brokerId] / numBundles; for (int i = 0; i < numBundles; ++i) { String bundleName = "broker-" + brokerId + "-bundle-" + i; localBrokerDatas[brokerId].getBundles().add(bundleName); localBrokerDatas[brokerId].setMsgRateIn(brokerMsgRates[brokerId]); localBrokerDatas[brokerId].setMsgThroughputIn(brokerMsgThroughputs[brokerId]); BundleData bundle = new BundleData(); TimeAverageMessageData timeAverageMessageData = new TimeAverageMessageData(); timeAverageMessageData.setMsgRateIn(msgRate); timeAverageMessageData.setMsgThroughputIn(throughput); bundle.setShortTermData(timeAverageMessageData); loadData.getBundleData().put(bundleName, bundle); } loadData.getBrokerData().put(brokerNames[brokerId], new BrokerData(localBrokerDatas[brokerId])); } // disable throughput based load shedding, enable rate based load shedding only conf.setLoadBalancerMsgRateDifferenceShedderThreshold(50); conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(0); Multimap<String, String> bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); assertEquals(bundlesToUnload.size(), 1); assertTrue(bundlesToUnload.containsKey("broker2")); // disable rate based load shedding, enable throughput based load shedding only conf.setLoadBalancerMsgRateDifferenceShedderThreshold(0); conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(2); bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); assertEquals(bundlesToUnload.size(), 1); assertTrue(bundlesToUnload.containsKey("broker3")); // enable both rate and throughput based load shedding, but rate based load shedding has higher priority conf.setLoadBalancerMsgRateDifferenceShedderThreshold(50); conf.setLoadBalancerMsgThroughputMultiplierDifferenceShedderThreshold(2); bundlesToUnload = uniformLoadShedder.findBundlesForUnloading(loadData, conf); assertEquals(bundlesToUnload.size(), 1); assertTrue(bundlesToUnload.containsKey("broker2")); }
private <T> RestResponse<T> get(final String path, final Class<T> type) { return executeRequestSync(HttpMethod.GET, path, null, r -> deserialize(r.getBody(), type), Optional.empty()); }
@Test public void shouldPostQueryRequest_chunkHandler_closeEarly() { ksqlTarget = new KsqlTarget(httpClient, socketAddress, localProperties, authHeader, HOST, Collections.emptyMap(), RequestOptions.DEFAULT_TIMEOUT); executor.submit(this::expectPostQueryRequestChunkHandler); assertThatEventually(requestStarted::get, is(true)); handlerCaptor.getValue().handle(Buffer.buffer("{\"row\": {\"columns\": [1.0, 12.1]}},\n")); closeConnection.complete(null); assertThatEventually(error::get, notNullValue()); assertThat(error.get().getMessage(), containsString("Error issuing POST to KSQL server. path:/query")); assertThat(rows.size(), is (1)); }
@Override public void init(DatabaseMetaData metaData) throws SQLException { checkState(!initialized, "onInit() must be called once"); Version version = checkDbVersion(metaData, MIN_SUPPORTED_VERSION); supportsNullNotDistinct = version.compareTo(MIN_NULL_NOT_DISTINCT_VERSION) >= 0; initialized = true; }
@Test void init_throws_ISE_if_called_twice() throws Exception { DatabaseMetaData metaData = newMetadata(11, 0); underTest.init(metaData); assertThatThrownBy(() -> underTest.init(metaData)) .isInstanceOf(IllegalStateException.class) .hasMessage("onInit() must be called once"); }
public void execute(int[] bytecode) { for (var i = 0; i < bytecode.length; i++) { Instruction instruction = Instruction.getInstruction(bytecode[i]); switch (instruction) { case LITERAL: // Read the next byte from the bytecode. int value = bytecode[++i]; // Push the next value to stack stack.push(value); break; case SET_AGILITY: var amount = stack.pop(); var wizard = stack.pop(); setAgility(wizard, amount); break; case SET_WISDOM: amount = stack.pop(); wizard = stack.pop(); setWisdom(wizard, amount); break; case SET_HEALTH: amount = stack.pop(); wizard = stack.pop(); setHealth(wizard, amount); break; case GET_HEALTH: wizard = stack.pop(); stack.push(getHealth(wizard)); break; case GET_AGILITY: wizard = stack.pop(); stack.push(getAgility(wizard)); break; case GET_WISDOM: wizard = stack.pop(); stack.push(getWisdom(wizard)); break; case ADD: var a = stack.pop(); var b = stack.pop(); stack.push(a + b); break; case DIVIDE: a = stack.pop(); b = stack.pop(); stack.push(b / a); break; case PLAY_SOUND: wizard = stack.pop(); getWizards()[wizard].playSound(); break; case SPAWN_PARTICLES: wizard = stack.pop(); getWizards()[wizard].spawnParticles(); break; default: throw new IllegalArgumentException("Invalid instruction value"); } LOGGER.info("Executed " + instruction.name() + ", Stack contains " + getStack()); } }
@Test void testPlaySound() { var wizardNumber = 0; var bytecode = new int[3]; bytecode[0] = LITERAL.getIntValue(); bytecode[1] = wizardNumber; bytecode[2] = PLAY_SOUND.getIntValue(); var vm = new VirtualMachine(); vm.execute(bytecode); assertEquals(0, vm.getStack().size()); assertEquals(1, vm.getWizards()[0].getNumberOfPlayedSounds()); }
@Override public ObjectNode encode(RemoteMepEntry remoteMepEntry, CodecContext context) { checkNotNull(remoteMepEntry, "Mep cannot be null"); ObjectNode result = context.mapper().createObjectNode() .put("remoteMepId", remoteMepEntry.remoteMepId().toString()) .put("remoteMepState", remoteMepEntry.state().name()) .put("rdi", remoteMepEntry.rdi()); if (remoteMepEntry.failedOrOkTime() != null) { result = result.put("failedOrOkTime", remoteMepEntry.failedOrOkTime().toString()); } if (remoteMepEntry.macAddress() != null) { result = result.put("macAddress", remoteMepEntry.macAddress().toString()); } if (remoteMepEntry.portStatusTlvType() != null) { result = result.put("portStatusTlvType", remoteMepEntry.portStatusTlvType().name()); } if (remoteMepEntry.interfaceStatusTlvType() != null) { result = result.put("interfaceStatusTlvType", remoteMepEntry.interfaceStatusTlvType().name()); } if (remoteMepEntry.senderIdTlvType() != null) { result = result.put("senderIdTlvType", remoteMepEntry.senderIdTlvType().name()); } return result; }
@Test public void testEncodeRemoteMepEntryCodecContext() { ObjectNode node = mapper.createObjectNode(); node.set("remoteMep", context.codec(RemoteMepEntry.class) .encode(remoteMep1, context)); assertEquals(10, node.get("remoteMep").get("remoteMepId").asInt()); }
@Override public void register(Component component) { checkComponent(component); checkArgument(component.getType() == Component.Type.FILE, "component must be a file"); checkState(analysisMetadataHolder.isPullRequest() || !analysisMetadataHolder.isFirstAnalysis(), "No file can be registered on first branch analysis"); addedComponents.add(component); }
@Test @UseDataProvider("anyTypeButFile") public void register_fails_with_IAE_if_component_is_not_a_file(Component.Type anyTypeButFile) { Component component = newComponent(anyTypeButFile); assertThatThrownBy(() -> underTest.register(component)) .isInstanceOf(IllegalArgumentException.class) .hasMessage("component must be a file"); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ShowSQLParserRuleStatement sqlStatement, final ContextManager contextManager) { SQLParserRuleConfiguration ruleConfig = rule.getConfiguration(); return Collections.singleton(new LocalDataQueryResultRow(null != ruleConfig.getParseTreeCache() ? ruleConfig.getParseTreeCache().toString() : "", null != ruleConfig.getSqlStatementCache() ? ruleConfig.getSqlStatementCache().toString() : "")); }
@Test void assertSQLParserRule() throws SQLException { engine.executeQuery(); Collection<LocalDataQueryResultRow> actual = engine.getRows(); assertThat(actual.size(), is(1)); Iterator<LocalDataQueryResultRow> iterator = actual.iterator(); LocalDataQueryResultRow row = iterator.next(); assertThat(row.getCell(1), is("initialCapacity: 128, maximumSize: 1024")); assertThat(row.getCell(2), is("initialCapacity: 2000, maximumSize: 65535")); }
public static String addCspHeadersWithNonceToResponse(HttpResponse httpResponse) { final String nonce = getNonce(); List<String> cspPolicies = List.of( "default-src 'self'", "base-uri 'none'", "connect-src 'self' http: https:", "font-src 'self' data:;" + "img-src * data: blob:", "object-src 'none'", "script-src 'nonce-" + nonce + "'", "style-src 'self' 'unsafe-inline'", "worker-src 'none'"); String policies = String.join("; ", cspPolicies).trim(); List<String> cspHeaders = List.of("Content-Security-Policy", "X-Content-Security-Policy", "X-WebKit-CSP"); cspHeaders.forEach(header -> httpResponse.setHeader(header, policies)); return nonce; }
@Test public void addCspHeadersWithNonceToResponse_whenCalled_shouldAddNonceToCspHeaders() { HttpResponse httpServletResponse = mock(HttpResponse.class); String nonce = addCspHeadersWithNonceToResponse(httpServletResponse); verify(httpServletResponse).setHeader(eq("Content-Security-Policy"), contains("script-src 'nonce-" + nonce + "'")); verify(httpServletResponse).setHeader(eq("X-Content-Security-Policy"), contains("script-src 'nonce-" + nonce + "'")); verify(httpServletResponse).setHeader(eq("X-WebKit-CSP"), contains("script-src 'nonce-" + nonce + "'")); }
public double[][] test(DataFrame data) { DataFrame x = formula.x(data); int n = x.nrow(); int ntrees = trees.length; double[][] prediction = new double[ntrees][n]; for (int j = 0; j < n; j++) { Tuple xj = x.get(j); double base = b; for (int i = 0; i < ntrees; i++) { base += shrinkage * trees[i].predict(xj); prediction[i][j] = base; } } return prediction; }
@Test public void testAileronsLAD() { test(Loss.lad(), "ailerons", Ailerons.formula, Ailerons.data, 0.0002); }
public static MySQLCommandPacket newInstance(final MySQLCommandPacketType commandPacketType, final MySQLPacketPayload payload, final ConnectionSession connectionSession) { switch (commandPacketType) { case COM_QUIT: return new MySQLComQuitPacket(); case COM_INIT_DB: return new MySQLComInitDbPacket(payload); case COM_FIELD_LIST: return new MySQLComFieldListPacket(payload); case COM_QUERY: return new MySQLComQueryPacket(payload); case COM_STMT_PREPARE: return new MySQLComStmtPreparePacket(payload); case COM_STMT_EXECUTE: MySQLServerPreparedStatement serverPreparedStatement = connectionSession.getServerPreparedStatementRegistry().getPreparedStatement(payload.getByteBuf().getIntLE(payload.getByteBuf().readerIndex())); return new MySQLComStmtExecutePacket(payload, serverPreparedStatement.getSqlStatementContext().getSqlStatement().getParameterCount()); case COM_STMT_SEND_LONG_DATA: return new MySQLComStmtSendLongDataPacket(payload); case COM_STMT_RESET: return new MySQLComStmtResetPacket(payload); case COM_STMT_CLOSE: return new MySQLComStmtClosePacket(payload); case COM_SET_OPTION: return new MySQLComSetOptionPacket(payload); case COM_PING: return new MySQLComPingPacket(); case COM_RESET_CONNECTION: return new MySQLComResetConnectionPacket(); default: return new MySQLUnsupportedCommandPacket(commandPacketType); } }
@Test void assertNewInstanceWithComStmtExecutePacket() { when(payload.readInt1()).thenReturn(MySQLNewParametersBoundFlag.PARAMETER_TYPE_EXIST.getValue()); when(payload.readInt4()).thenReturn(1); when(payload.getByteBuf().getIntLE(anyInt())).thenReturn(1); ServerPreparedStatementRegistry serverPreparedStatementRegistry = new ServerPreparedStatementRegistry(); when(connectionSession.getServerPreparedStatementRegistry()).thenReturn(serverPreparedStatementRegistry); SQLStatementContext sqlStatementContext = mock(SQLStatementContext.class); when(sqlStatementContext.getSqlStatement()).thenReturn(new MySQLSelectStatement()); serverPreparedStatementRegistry.addPreparedStatement(1, new MySQLServerPreparedStatement("select 1", sqlStatementContext, new HintValueContext(), Collections.emptyList())); assertThat(MySQLCommandPacketFactory.newInstance(MySQLCommandPacketType.COM_STMT_EXECUTE, payload, connectionSession), instanceOf(MySQLComStmtExecutePacket.class)); }
@VisibleForTesting static int combinationCount(int arrayLength, int combinationLength) { try { /* * Then combinationCount(n, k) = combinationCount(n-1, k-1) * n/k (https://en.wikipedia.org/wiki/Combination#Number_of_k-combinations) * The formula is recursive. Here, instead of starting with k=combinationCount, n=arrayLength and recursing, * we start with k=0 n=(arrayLength-combinationLength) and proceed "bottom up". */ int combinations = 1; for (int i = 1; i <= combinationLength; i++) { combinations = multiplyExact(combinations, arrayLength - combinationLength + i) / i; } return combinations; } catch (ArithmeticException e) { throw new PrestoException(INVALID_FUNCTION_ARGUMENT, format("Number of combinations too large for array of size %s and combination length %s", arrayLength, combinationLength)); } }
@Test public void testCombinationCount() { for (int n = 0; n < 5; n++) { for (int k = 0; k <= n; k++) { assertEquals(combinationCount(n, k), factorial(n) / factorial(n - k) / factorial(k)); } } assertEquals(combinationCount(42, 7), 26978328); assertEquals(combinationCount(100, 4), 3921225); }
@Override public AppResponse process(Flow flow, AppRequest body) { Map<String, String> registration = digidClient.pollLetter(appSession.getAccountId(), appSession.getRegistrationId(), flow.getName().equals(ReApplyActivateActivationCode.NAME)); if (registration.get(lowerUnderscore(GBA_STATUS)).equals("request")) { setValid(false); return new StatusResponse("PENDING"); } else if (registration.get(lowerUnderscore(GBA_STATUS)).equals("deceased")){ digidClient.remoteLog("559", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); return new NokResponse("gba_deceased"); } else if (GBA_EMIGATED_RNI.contains(registration.get(lowerUnderscore(GBA_STATUS)))) { digidClient.remoteLog("558", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); return new NokResponse("gba_emigrated_RNI"); } else if (registration.get(lowerUnderscore(GBA_STATUS)).equals("error")){ return new NokResponse("error"); } else if (!registration.get(lowerUnderscore(GBA_STATUS)).equals("valid_app_extension")){ digidClient.remoteLog("558", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(HIDDEN), true)); return new NokResponse("gba_invalid"); } digidClient.remoteLog("156", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), "device_name", appAuthenticator.getDeviceName(), lowerUnderscore(HIDDEN), true)); appAuthenticator.setRequestedAt(ZonedDateTime.now()); appAuthenticator.setIssuerType(registration.get(lowerUnderscore(ISSUER_TYPE))); digidClient.remoteLog("905", Map.of(lowerUnderscore(ACCOUNT_ID), appAuthenticator.getAccountId(), lowerUnderscore(APP_CODE), appAuthenticator.getAppCode(), lowerUnderscore(DEVICE_NAME), appAuthenticator.getDeviceName())); return new OkResponse(); }
@Test void processStatusRequest(){ when(digidClientMock.pollLetter(mockedAppSession.getAccountId(), mockedAppSession.getRegistrationId(), false)).thenReturn(gbaStatusResponseRequest); AppResponse appResponse = letterPolling.process(mockedFlow, mockedAbstractAppRequest); assertTrue(appResponse instanceof StatusResponse); assertEquals("PENDING", ((StatusResponse)appResponse).getStatus()); }
@Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + (this.connectionId == null ? 0 : this.connectionId.hashCode()); result = prime * result + (this.host == null ? 0 : this.host.hashCode()); result = prime * result + this.port; return result; }
@Test public void testEqual2() { ConnectionId id1 = new ConnectionId("host", 1202, "id"); ConnectionId id2 = new ConnectionId("host", 1202, "id"); assertEquals(id1.hashCode(), id2.hashCode(), "Hash code must be equal"); }
public static Map<String, AdvertisedListener> validateAndAnalysisAdvertisedListener(ServiceConfiguration config) { if (StringUtils.isBlank(config.getAdvertisedListeners())) { return Collections.emptyMap(); } Optional<String> firstListenerName = Optional.empty(); Map<String, List<String>> listeners = new LinkedHashMap<>(); for (final String str : StringUtils.split(config.getAdvertisedListeners(), ",")) { int index = str.indexOf(":"); if (index <= 0) { throw new IllegalArgumentException("the configure entry `advertisedListeners` is invalid. because " + str + " do not contain listener name"); } String listenerName = StringUtils.trim(str.substring(0, index)); if (!firstListenerName.isPresent()) { firstListenerName = Optional.of(listenerName); } String value = StringUtils.trim(str.substring(index + 1)); listeners.computeIfAbsent(listenerName, k -> new ArrayList<>(2)); listeners.get(listenerName).add(value); } if (StringUtils.isBlank(config.getInternalListenerName())) { config.setInternalListenerName(firstListenerName.get()); } if (!listeners.containsKey(config.getInternalListenerName())) { throw new IllegalArgumentException("the `advertisedListeners` configure do not contain " + "`internalListenerName` entry"); } final Map<String, AdvertisedListener> result = new LinkedHashMap<>(); final Map<String, Set<String>> reverseMappings = new LinkedHashMap<>(); for (final Map.Entry<String, List<String>> entry : listeners.entrySet()) { if (entry.getValue().size() > 2) { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } URI pulsarAddress = null, pulsarSslAddress = null, pulsarHttpAddress = null, pulsarHttpsAddress = null; for (final String strUri : entry.getValue()) { try { URI uri = URI.create(strUri); if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar")) { if (pulsarAddress == null) { pulsarAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "pulsar+ssl")) { if (pulsarSslAddress == null) { pulsarSslAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "http")) { if (pulsarHttpAddress == null) { pulsarHttpAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } else if (StringUtils.equalsIgnoreCase(uri.getScheme(), "https")) { if (pulsarHttpsAddress == null) { pulsarHttpsAddress = uri; } else { throw new IllegalArgumentException("there are redundant configure for listener `" + entry.getKey() + "`"); } } String hostPort = String.format("%s:%d", uri.getHost(), uri.getPort()); Set<String> sets = reverseMappings.computeIfAbsent(hostPort, k -> new TreeSet<>()); sets.add(entry.getKey()); if (sets.size() > 1) { throw new IllegalArgumentException("must not specify `" + hostPort + "` to different listener."); } } catch (Throwable cause) { throw new IllegalArgumentException("the value " + strUri + " in the `advertisedListeners` " + "configure is invalid", cause); } } result.put(entry.getKey(), AdvertisedListener.builder() .brokerServiceUrl(pulsarAddress) .brokerServiceUrlTls(pulsarSslAddress) .brokerHttpUrl(pulsarHttpAddress) .brokerHttpsUrl(pulsarHttpsAddress) .build()); } return result; }
@Test(expectedExceptions = IllegalArgumentException.class) public void testDifferentListenerWithSameHostPort() { ServiceConfiguration config = new ServiceConfiguration(); config.setAdvertisedListeners(" internal:pulsar://127.0.0.1:6660," + " external:pulsar://127.0.0.1:6660"); config.setInternalListenerName("internal"); MultipleListenerValidator.validateAndAnalysisAdvertisedListener(config); }
@Override public JreInfoRestResponse getJreMetadata(String id) { return Optional.ofNullable(metadata.get(id)) .orElseThrow(() -> new NotFoundException("JRE not found for id: " + id)); }
@Test void getJreMetadata_shouldFail_whenJreNotFound() { assertThatThrownBy(() -> jresHandler.getJreMetadata("4")) .isInstanceOf(NotFoundException.class) .hasMessage("JRE not found for id: 4"); }
public int computeThreshold(StreamConfig streamConfig, CommittingSegmentDescriptor committingSegmentDescriptor, @Nullable SegmentZKMetadata committingSegmentZKMetadata, String newSegmentName) { long desiredSegmentSizeBytes = streamConfig.getFlushThresholdSegmentSizeBytes(); if (desiredSegmentSizeBytes <= 0) { desiredSegmentSizeBytes = StreamConfig.DEFAULT_FLUSH_THRESHOLD_SEGMENT_SIZE_BYTES; } long optimalSegmentSizeBytesMin = desiredSegmentSizeBytes / 2; double optimalSegmentSizeBytesMax = desiredSegmentSizeBytes * 1.5; if (committingSegmentZKMetadata == null) { // first segment of the partition, hence committing segment is null if (_latestSegmentRowsToSizeRatio > 0) { // new partition group added case long targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, using prev ratio {}, setting rows threshold for {} as {}", _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } else { final int autotuneInitialRows = streamConfig.getFlushAutotuneInitialRows(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment zk metadata is not available, setting threshold for {} as {}", newSegmentName, autotuneInitialRows); return autotuneInitialRows; } } final long committingSegmentSizeBytes = committingSegmentDescriptor.getSegmentSizeBytes(); if (committingSegmentSizeBytes <= 0 // repair segment case || SegmentCompletionProtocol.REASON_FORCE_COMMIT_MESSAGE_RECEIVED.equals( committingSegmentDescriptor.getStopReason())) { String reason = committingSegmentSizeBytes <= 0 // ? "Committing segment size is not available" // : "Committing segment is due to force-commit"; final int targetNumRows = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info("{}, setting thresholds from previous segment for {} as {}", reason, newSegmentName, targetNumRows); return targetNumRows; } final long timeConsumed = _clock.millis() - committingSegmentZKMetadata.getCreationTime(); final long numRowsConsumed = committingSegmentZKMetadata.getTotalDocs(); final int numRowsThreshold = committingSegmentZKMetadata.getSizeThresholdToFlushSegment(); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "{}: Data from committing segment: Time {} numRows {} threshold {} segmentSize(bytes) {}", newSegmentName, TimeUtils.convertMillisToPeriod(timeConsumed), numRowsConsumed, numRowsThreshold, committingSegmentSizeBytes); double currentRatio = (double) numRowsConsumed / committingSegmentSizeBytes; if (_latestSegmentRowsToSizeRatio > 0) { _latestSegmentRowsToSizeRatio = CURRENT_SEGMENT_RATIO_WEIGHT * currentRatio + PREVIOUS_SEGMENT_RATIO_WEIGHT * _latestSegmentRowsToSizeRatio; } else { _latestSegmentRowsToSizeRatio = currentRatio; } // If the number of rows consumed is less than what we set as target in metadata, then the segment hit time limit. // We can set the new target to be slightly higher than the actual number of rows consumed so that we can aim // to hit the row limit next time around. // // If the size of the committing segment is higher than the desired segment size, then the administrator has // set a lower segment size threshold. We should treat this case as if we have hit thw row limit and not the time // limit. // // TODO: add feature to adjust time threshold as well // If we set new threshold to be numRowsConsumed, we might keep oscillating back and forth between doubling limit // and time threshold being hit If we set new threshold to be committingSegmentZKMetadata // .getSizeThresholdToFlushSegment(), // we might end up using a lot more memory than required for the segment Using a minor bump strategy, until // we add feature to adjust time We will only slightly bump the threshold based on numRowsConsumed if (numRowsConsumed < numRowsThreshold && committingSegmentSizeBytes < desiredSegmentSizeBytes) { final long timeThresholdMillis = streamConfig.getFlushThresholdTimeMillis(); long currentNumRows = numRowsConsumed; StringBuilder logStringBuilder = new StringBuilder().append("Time threshold reached. "); if (timeThresholdMillis < timeConsumed) { // The administrator has reduced the time threshold. Adjust the // number of rows to match the average consumption rate on the partition. currentNumRows = timeThresholdMillis * numRowsConsumed / timeConsumed; logStringBuilder.append(" Detected lower time threshold, adjusting numRowsConsumed to ").append(currentNumRows) .append(". "); } long targetSegmentNumRows = (long) (currentNumRows * ROWS_MULTIPLIER_WHEN_TIME_THRESHOLD_HIT); targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); logStringBuilder.append("Setting segment size for {} as {}"); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info(logStringBuilder.toString(), newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; } long targetSegmentNumRows; if (committingSegmentSizeBytes < optimalSegmentSizeBytesMin) { targetSegmentNumRows = numRowsConsumed + numRowsConsumed / 2; } else if (committingSegmentSizeBytes > optimalSegmentSizeBytesMax) { targetSegmentNumRows = numRowsConsumed / 2; } else { if (_latestSegmentRowsToSizeRatio > 0) { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * _latestSegmentRowsToSizeRatio); } else { targetSegmentNumRows = (long) (desiredSegmentSizeBytes * currentRatio); } } targetSegmentNumRows = capNumRowsIfOverflow(targetSegmentNumRows); SegmentSizeBasedFlushThresholdUpdater.LOGGER.info( "Committing segment size {}, current ratio {}, setting threshold for {} as {}", committingSegmentSizeBytes, _latestSegmentRowsToSizeRatio, newSegmentName, targetSegmentNumRows); return (int) targetSegmentNumRows; }
@Test public void testUseLastSegmentsThresholdIfSegmentIsCommittingDueToForceCommit() { long committingSegmentSizeBytes = 500_000L; int committingSegmentSizeThreshold = 25_000; SegmentFlushThresholdComputer computer = new SegmentFlushThresholdComputer(); CommittingSegmentDescriptor committingSegmentDescriptor = mock(CommittingSegmentDescriptor.class); when(committingSegmentDescriptor.getSegmentSizeBytes()).thenReturn(committingSegmentSizeBytes); when(committingSegmentDescriptor.getStopReason()).thenReturn(REASON_FORCE_COMMIT_MESSAGE_RECEIVED); SegmentZKMetadata committingSegmentZKMetadata = mock(SegmentZKMetadata.class); when(committingSegmentZKMetadata.getSizeThresholdToFlushSegment()).thenReturn(committingSegmentSizeThreshold); StreamConfig streamConfig = mock(StreamConfig.class); int newSegmentSizeThreshold = computer.computeThreshold(streamConfig, committingSegmentDescriptor, committingSegmentZKMetadata, "newSegmentName"); assertEquals(newSegmentSizeThreshold, committingSegmentSizeThreshold); }
public static long computeStartOfNextMinute(long now) { Calendar cal = Calendar.getInstance(); cal.setTime(new Date(now)); cal.set(Calendar.MILLISECOND, 0); cal.set(Calendar.SECOND, 0); cal.add(Calendar.MINUTE, 1); return cal.getTime().getTime(); }
@Test public void testMinute() { // Mon Nov 20 18:05:17,522 CET 2006 long now = 1164042317522L; // Mon Nov 20 18:06:00 CET 2006 long expected = 1164042360000L; long computed = TimeUtil.computeStartOfNextMinute(now); Assertions.assertEquals(expected - now, 1000 * 42 + 478); Assertions.assertEquals(expected, computed); }
@Override public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) { int nextValue = nextValue(topic); List<PartitionInfo> availablePartitions = cluster.availablePartitionsForTopic(topic); if (!availablePartitions.isEmpty()) { int part = Utils.toPositive(nextValue) % availablePartitions.size(); return availablePartitions.get(part).partition(); } else { // no partitions are available, give a non-available partition int numPartitions = cluster.partitionsForTopic(topic).size(); return Utils.toPositive(nextValue) % numPartitions; } }
@Test public void testRoundRobinWithNullKeyBytes() { final String topicA = "topicA"; final String topicB = "topicB"; List<PartitionInfo> allPartitions = asList(new PartitionInfo(topicA, 0, NODES[0], NODES, NODES), new PartitionInfo(topicA, 1, NODES[1], NODES, NODES), new PartitionInfo(topicA, 2, NODES[2], NODES, NODES), new PartitionInfo(topicB, 0, NODES[0], NODES, NODES)); Cluster testCluster = new Cluster("clusterId", asList(NODES[0], NODES[1], NODES[2]), allPartitions, Collections.emptySet(), Collections.emptySet()); final Map<Integer, Integer> partitionCount = new HashMap<>(); Partitioner partitioner = new RoundRobinPartitioner(); for (int i = 0; i < 30; ++i) { int partition = partitioner.partition(topicA, null, null, null, null, testCluster); Integer count = partitionCount.get(partition); if (null == count) count = 0; partitionCount.put(partition, count + 1); if (i % 5 == 0) { partitioner.partition(topicB, null, null, null, null, testCluster); } } assertEquals(10, partitionCount.get(0).intValue()); assertEquals(10, partitionCount.get(1).intValue()); assertEquals(10, partitionCount.get(2).intValue()); }
@VisibleForTesting static StreamExecutionEnvironment createStreamExecutionEnvironment(FlinkPipelineOptions options) { return createStreamExecutionEnvironment( options, MoreObjects.firstNonNull(options.getFilesToStage(), Collections.emptyList()), options.getFlinkConfDir()); }
@Test public void shouldAcceptExplicitlySetIdleSourcesFlagWithoutCheckpointing() { // Checkpointing disabled, accept flag FlinkPipelineOptions options = getDefaultPipelineOptions(); options.setShutdownSourcesAfterIdleMs(42L); FlinkExecutionEnvironments.createStreamExecutionEnvironment(options); assertThat(options.getShutdownSourcesAfterIdleMs(), is(42L)); }
@Override public Collection<LocalDataQueryResultRow> getRows(final ExportStorageNodesStatement sqlStatement, final ContextManager contextManager) { checkSQLStatement(contextManager.getMetaDataContexts().getMetaData(), sqlStatement); String exportedData = generateExportData(contextManager.getMetaDataContexts().getMetaData(), sqlStatement); if (sqlStatement.getFilePath().isPresent()) { String filePath = sqlStatement.getFilePath().get(); ExportUtils.exportToFile(filePath, exportedData); return Collections.singleton(new LocalDataQueryResultRow(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), LocalDateTime.now(), String.format("Successfully exported to:'%s'", filePath))); } return Collections.singleton( new LocalDataQueryResultRow(contextManager.getComputeNodeInstanceContext().getInstance().getMetaData().getId(), LocalDateTime.now(), exportedData)); }
@Test void assertExecuteWithDatabaseName() { when(database.getName()).thenReturn("normal_db"); Map<String, StorageUnit> storageUnits = createStorageUnits(); when(database.getResourceMetaData().getStorageUnits()).thenReturn(storageUnits); when(database.getRuleMetaData().getConfigurations()).thenReturn(Collections.singleton(createShardingRuleConfiguration())); ContextManager contextManager = mockContextManager(); when(ProxyContext.getInstance().getContextManager()).thenReturn(contextManager); Collection<LocalDataQueryResultRow> actual = new ExportStorageNodesExecutor().getRows(new ExportStorageNodesStatement("normal_db", null), contextManager); assertThat(actual.size(), is(1)); LocalDataQueryResultRow row = actual.iterator().next(); assertThat(row.getCell(3), is(loadExpectedRow())); }
public FEELFnResult<TemporalAmount> invoke(@ParameterName("from") Temporal from, @ParameterName("to") Temporal to) { if ( from == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "cannot be null")); } if ( to == null ) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "cannot be null")); } final LocalDate fromDate = getLocalDateFromTemporal(from); if (fromDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "from", "is of type not suitable for years and months function")); } final LocalDate toDate = getLocalDateFromTemporal(to); if (toDate == null) { return FEELFnResult.ofError(new InvalidParametersEvent(Severity.ERROR, "to", "is of type not suitable for years and months function")); } return FEELFnResult.ofResult(new ComparablePeriod(Period.between(fromDate, toDate).withDays(0))); }
@Test void invokeYearLocalDate() { FunctionTestUtil.assertResult( yamFunction.invoke(LocalDate.of(2017, 6, 12), Year.of(2020)), ComparablePeriod.of(2, 6, 0)); }
@Override public int compareTo(DateTimeStamp dateTimeStamp) { return comparator.compare(this,dateTimeStamp); }
@Test void testCompareEqualsTimeStamp() { DateTimeStamp object1 = new DateTimeStamp("2018-04-04T10:10:00.586-0100", 123); DateTimeStamp object2 = new DateTimeStamp("2018-04-04T10:10:00.586-0100", 123); assertEquals(0, object2.compareTo(object1)); }
public static Sensor processRateSensor(final String threadId, final StreamsMetricsImpl streamsMetrics) { final Sensor sensor = streamsMetrics.threadLevelSensor(threadId, PROCESS + RATE_SUFFIX, RecordingLevel.INFO); final Map<String, String> tagMap = streamsMetrics.threadLevelTagMap(threadId); addRateOfSumAndSumMetricsToSensor( sensor, THREAD_LEVEL_GROUP, tagMap, PROCESS, PROCESS_RATE_DESCRIPTION, PROCESS_TOTAL_DESCRIPTION ); return sensor; }
@Test public void shouldGetProcessRateSensor() { final String operation = "process"; final String operationRate = "process" + RATE_SUFFIX; final String totalDescription = "The total number of calls to process"; final String rateDescription = "The average per-second number of calls to process"; when(streamsMetrics.threadLevelSensor(THREAD_ID, operationRate, RecordingLevel.INFO)).thenReturn(expectedSensor); when(streamsMetrics.threadLevelTagMap(THREAD_ID)).thenReturn(tagMap); try (final MockedStatic<StreamsMetricsImpl> streamsMetricsStaticMock = mockStatic(StreamsMetricsImpl.class)) { final Sensor sensor = ThreadMetrics.processRateSensor(THREAD_ID, streamsMetrics); streamsMetricsStaticMock.verify( () -> StreamsMetricsImpl.addRateOfSumAndSumMetricsToSensor( expectedSensor, THREAD_LEVEL_GROUP, tagMap, operation, rateDescription, totalDescription ) ); assertThat(sensor, is(expectedSensor)); } }
@Override public void shutdown(Callback<None> callback) { _client.shutdown(callback); }
@Test public void testShutdown() { Client client = EasyMock.createMock(Client.class); @SuppressWarnings("unchecked") Callback<None> callback = EasyMock.createMock(Callback.class); Capture<Callback<None>> callbackCapture = EasyMock.newCapture(); // Underlying client's shutdown should be invoked with correct callback client.shutdown(EasyMock.capture(callbackCapture)); EasyMock.replay(client); // No methods should be invoked on the callback EasyMock.replay(callback); RestClient restClient = new RestClient(client, "d2://"); restClient.shutdown(callback); EasyMock.verify(client); EasyMock.verify(callback); EasyMock.reset(callback); None none = None.none(); callback.onSuccess(none); EasyMock.replay(callback); Callback<None> captured = callbackCapture.getValue(); captured.onSuccess(none); EasyMock.verify(callback); }
@VisibleForTesting static Comparator<ActualProperties> streamingExecutionPreference(PreferredProperties preferred) { // Calculating the matches can be a bit expensive, so cache the results between comparisons LoadingCache<List<LocalProperty<VariableReferenceExpression>>, List<Optional<LocalProperty<VariableReferenceExpression>>>> matchCache = CacheBuilder.newBuilder() .build(CacheLoader.from(actualProperties -> LocalProperties.match(actualProperties, preferred.getLocalProperties()))); return (actual1, actual2) -> { List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout1 = matchCache.getUnchecked(actual1.getLocalProperties()); List<Optional<LocalProperty<VariableReferenceExpression>>> matchLayout2 = matchCache.getUnchecked(actual2.getLocalProperties()); return ComparisonChain.start() .compareTrueFirst(hasLocalOptimization(preferred.getLocalProperties(), matchLayout1), hasLocalOptimization(preferred.getLocalProperties(), matchLayout2)) .compareTrueFirst(meetsPartitioningRequirements(preferred, actual1), meetsPartitioningRequirements(preferred, actual2)) .compare(matchLayout1, matchLayout2, matchedLayoutPreference()) .result(); }; }
@Test public void testPickLayoutGroupedWithSort() { Comparator<ActualProperties> preference = streamingExecutionPreference (PreferredProperties.local(ImmutableList.of(grouped("a"), sorted("b", ASC_NULLS_FIRST)))); List<ActualProperties> input = ImmutableList.<ActualProperties>builder() .add(builder() .global(streamPartitionedOn("a")) .build()) .add(builder() .global(singleStreamPartition()) .build()) .add(builder() .global(arbitraryPartition()) .local(ImmutableList.of(grouped("a", "b"))) .build()) .add(builder() .global(arbitraryPartition()) .build()) .add(builder() .global(hashDistributedOn("a")) .build()) .add(builder() .global(singleStream()) .local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST))) .build()) .add(builder() .global(singleStreamPartition()) .local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST))) .build()) .add(builder() .global(hashDistributedOn("a")) .build()) .build(); List<ActualProperties> expected = ImmutableList.<ActualProperties>builder() .add(builder() .global(singleStream()) .local(ImmutableList.of(constant("a"), sorted("b", ASC_NULLS_FIRST))) .build()) .add(builder() .global(singleStreamPartition()) .local(ImmutableList.of(sorted("a", ASC_NULLS_FIRST))) .build()) .add(builder() .global(streamPartitionedOn("a")) .build()) .add(builder() .global(singleStreamPartition()) .build()) .add(builder() .global(arbitraryPartition()) .local(ImmutableList.of(grouped("a", "b"))) .build()) .add(builder() .global(arbitraryPartition()) .build()) .add(builder() .global(hashDistributedOn("a")) .build()) .add(builder() .global(hashDistributedOn("a")) .build()) .build(); assertEquals(stableSort(input, preference), expected); }
@Override public DescriptiveUrlBag toUrl(final Path file) { return new DescriptiveUrlBag(Collections.singletonList( new DescriptiveUrl(URI.create(String.format("https://webmail.freenet.de/web/?goTo=share&path=/%s#cloud", URIEncoder.encode(PathRelativizer.relativize(PathNormalizer.normalize(host.getDefaultPath(), true), file.isFile() ? file.getParent().getAbsolute() : file.getAbsolute())))), DescriptiveUrl.Type.http) )); }
@Test public void testToUrlRoot() { final FreenetUrlProvider provider = new FreenetUrlProvider(new Host(new FreenetProtocol(), "dav.freenet.de", 443, "/webdav")); final DescriptiveUrlBag urls = provider.toUrl(new Path("/webdav", EnumSet.of(Path.Type.directory))); assertEquals(1, urls.size()); final DescriptiveUrl url = urls.find(DescriptiveUrl.Type.http); assertNotEquals(DescriptiveUrl.EMPTY, url); assertEquals(DescriptiveUrl.Type.http, url.getType()); assertEquals("https://webmail.freenet.de/web/?goTo=share&path=/#cloud", url.getUrl()); }
@Override public Iterable<FileSystem<?>> fromOptions(@Nonnull PipelineOptions options) { final List<Configuration> configurations = options.as(HadoopFileSystemOptions.class).getHdfsConfiguration(); if (configurations == null) { // nothing to register return Collections.emptyList(); } checkArgument( configurations.size() == 1, String.format( "The %s currently only supports at most a single Hadoop configuration.", HadoopFileSystemRegistrar.class.getSimpleName())); final ImmutableList.Builder<FileSystem<?>> builder = ImmutableList.builder(); final Set<String> registeredSchemes = new HashSet<>(); // this will only do zero or one loop final Configuration configuration = Iterables.getOnlyElement(configurations); final String defaultFs = configuration.get(org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY); if (defaultFs != null && !defaultFs.isEmpty()) { final String scheme = Objects.requireNonNull( URI.create(defaultFs).getScheme(), String.format( "Empty scheme for %s value.", org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY)); builder.add(new HadoopFileSystem(scheme, configuration)); registeredSchemes.add(scheme); } final String nameServices = configuration.get(CONFIG_KEY_DFS_NAMESERVICES); if (nameServices != null && !nameServices.isEmpty()) { // we can register schemes that are support by HA cluster for (String scheme : HA_SCHEMES) { if (!registeredSchemes.contains(scheme)) { builder.add(new HadoopFileSystem(scheme, configuration)); } } } return builder.build(); }
@Test public void testServiceLoader() { HadoopFileSystemOptions options = PipelineOptionsFactory.as(HadoopFileSystemOptions.class); options.setHdfsConfiguration(ImmutableList.of(configuration)); for (FileSystemRegistrar registrar : Lists.newArrayList(ServiceLoader.load(FileSystemRegistrar.class).iterator())) { if (registrar instanceof HadoopFileSystemRegistrar) { Iterable<FileSystem<?>> fileSystems = registrar.fromOptions(options); assertEquals( hdfsClusterBaseUri.getScheme(), ((HadoopFileSystem) Iterables.getOnlyElement(fileSystems)).getScheme()); return; } } fail("Expected to find " + HadoopFileSystemRegistrar.class); }
public Options getPaimonOptions() { return this.paimonOptions; }
@Test public void testCreatePaimonConnectorWithOSS() { Map<String, String> properties = new HashMap<>(); properties.put("paimon.catalog.warehouse", "oss://bucket/warehouse"); properties.put("paimon.catalog.type", "filesystem"); String accessKeyValue = "oss_access_key"; String secretKeyValue = "oss_secret_key"; String endpointValue = "oss_endpoint"; properties.put("aliyun.oss.access_key", accessKeyValue); properties.put("aliyun.oss.secret_key", secretKeyValue); properties.put("aliyun.oss.endpoint", endpointValue); PaimonConnector connector = new PaimonConnector(new ConnectorContext("paimon_catalog", "paimon", properties)); Options paimonOptions = connector.getPaimonOptions(); String accessKeyOption = paimonOptions.get("fs.oss.accessKeyId"); String secretKeyOption = paimonOptions.get("fs.oss.accessKeySecret"); String endpointOption = paimonOptions.get("fs.oss.endpoint"); Assert.assertEquals(accessKeyOption, accessKeyValue); Assert.assertEquals(secretKeyOption, secretKeyValue); Assert.assertEquals(endpointOption, endpointValue); }
@Override public boolean remove(Object o) { checkNotNull(o, "Item to be removed cannot be null."); return items.remove(serializer.encode(o)); }
@Test public void testRemove() throws Exception { //Test removal fillSet(10, set); for (int i = 0; i < 10; i++) { assertEquals("The size of the set is wrong.", 10 - i, set.size()); assertTrue("The first removal should be true.", set.remove(i)); assertFalse("The second removal should be false (item no longer contained).", set.remove(i)); } assertTrue("All elements should have been removed.", set.isEmpty()); }
public ScaledView withUnitsPrecise(final String name, final ScaleUnits scaleUnits) { return new ScaledView(name, this::getElapsedNanos, scaleUnits.nanoRelativeDecimalShift); }
@Test public void withUnitsPrecise() { final ManualAdvanceClock clock = new ManualAdvanceClock(Instant.now()); final UptimeMetric uptimeMetric = new UptimeMetric("up_millis", clock::nanoTime); clock.advance(Duration.ofNanos(123_456_789_987L)); // 123.xx seconds // set-up: ensure advancing nanos reflects in our milli-based uptime assertEquals(Long.valueOf(123_456L), uptimeMetric.getValue()); final UptimeMetric.ScaledView secondsBasedView = uptimeMetric.withUnitsPrecise("up_seconds", UptimeMetric.ScaleUnits.SECONDS); final UptimeMetric.ScaledView millisecondsBasedView = uptimeMetric.withUnitsPrecise("up_millis", UptimeMetric.ScaleUnits.MILLISECONDS); final UptimeMetric.ScaledView microsecondsBasedView = uptimeMetric.withUnitsPrecise("up_micros", UptimeMetric.ScaleUnits.MICROSECONDS); final UptimeMetric.ScaledView nanosecondsBasedView = uptimeMetric.withUnitsPrecise("up_nanos", UptimeMetric.ScaleUnits.NANOSECONDS); assertEquals(new BigDecimal("123.456789987"), secondsBasedView.getValue()); assertEquals(new BigDecimal("123456.789987"), millisecondsBasedView.getValue()); assertEquals(new BigDecimal("123456789.987"), microsecondsBasedView.getValue()); assertEquals(new BigDecimal("123456789987"), nanosecondsBasedView.getValue()); clock.advance(Duration.ofMillis(1_999)); assertEquals(Long.valueOf(125_455L), uptimeMetric.getValue()); assertEquals(new BigDecimal("125.455789987"), secondsBasedView.getValue()); assertEquals(new BigDecimal("125455.789987"), millisecondsBasedView.getValue()); assertEquals(new BigDecimal("125455789.987"), microsecondsBasedView.getValue()); assertEquals(new BigDecimal("125455789987"), nanosecondsBasedView.getValue()); }
public static boolean satisfiesRequires(String version, String requires) { String requiresVersion = StringUtils.trim(requires); // an exact version x.y.z will implicitly mean the same as >=x.y.z if (requiresVersion.matches("^\\d+\\.\\d+\\.\\d+$")) { // If exact versions are not allowed in requires, rewrite to >= expression requiresVersion = ">=" + requiresVersion; } return version.equals("0.0.0") || checkVersionConstraint(version, requiresVersion); }
@Test void satisfiesRequires() { // match all requires String systemVersion = "0.0.0"; String requires = ">=2.2.0"; boolean result = VersionUtils.satisfiesRequires(systemVersion, requires); assertThat(result).isTrue(); systemVersion = "2.0.0"; requires = "*"; result = VersionUtils.satisfiesRequires(systemVersion, requires); assertThat(result).isTrue(); systemVersion = "2.0.0"; requires = ""; result = VersionUtils.satisfiesRequires(systemVersion, requires); assertThat(result).isTrue(); // match exact version systemVersion = "2.0.0"; requires = ">=2.0.0"; result = VersionUtils.satisfiesRequires(systemVersion, requires); assertThat(result).isTrue(); systemVersion = "2.0.0"; requires = ">2.0.0"; result = VersionUtils.satisfiesRequires(systemVersion, requires); assertThat(result).isFalse(); //an exact version x.y.z will implicitly mean the same as >=x.y.z systemVersion = "2.1.0"; // means >=2.0.0 requires = "2.0.0"; result = VersionUtils.satisfiesRequires(systemVersion, requires); assertThat(result).isTrue(); }
public static Level toLevel(String sArg) { return toLevel(sArg, Level.DEBUG); }
@Test public void withSpaceAround( ) { assertEquals(Level.INFO, Level.toLevel(" INFO ")); }
public RunResponse restartDirectly( RunResponse restartStepInfo, RunRequest runRequest, boolean blocking) { WorkflowInstance instance = restartStepInfo.getInstance(); String stepId = restartStepInfo.getStepId(); validateStepId(instance, stepId, Actions.StepInstanceAction.RESTART); StepInstance stepInstance = getStepInstanceAndValidate(instance, stepId, runRequest.getRestartConfig()); // prepare payload and then add to db StepAction stepAction = StepAction.createRestart(stepInstance, runRequest); saveAction(stepInstance, stepAction); if (blocking) { return waitResponseWithTimeout(stepInstance, stepAction); } else { return RunResponse.from(stepInstance, stepAction.toTimelineEvent()); } }
@Test public void testRestartDirectlyWhileForeachStepRunning() { stepInstance.getRuntimeState().setStatus(StepInstance.Status.RUNNING); ((TypedStep) stepInstance.getDefinition()).setType(StepType.FOREACH); ForeachArtifact artifact = new ForeachArtifact(); artifact.setForeachWorkflowId("maestro-foreach-wf"); artifact.setNextLoopIndex(12); artifact.setForeachOverview(new ForeachStepOverview()); artifact.getForeachOverview().addOne(10, WorkflowInstance.Status.FAILED, null); artifact.getForeachOverview().refreshDetail(); stepInstance.setArtifacts(Collections.singletonMap(Artifact.Type.FOREACH.key(), artifact)); stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true); RunResponse restartStepInfo = RunResponse.builder().instance(instance).stepId("job1").build(); RunRequest runRequest = RunRequest.builder() .requester(user) .currentPolicy(RunPolicy.RESTART_FROM_SPECIFIC) .restartConfig( RestartConfig.builder() .addRestartNode("maestro-foreach-wf", 10, "job2") .addRestartNode("sample-dag-test-3", 1, "job1") .build()) .build(); RunResponse response = actionDao.restartDirectly(restartStepInfo, runRequest, false); Assert.assertEquals("sample-dag-test-3", response.getWorkflowId()); Assert.assertEquals(1, response.getWorkflowInstanceId()); Assert.assertEquals(1, response.getWorkflowRunId()); Assert.assertEquals("job1", response.getStepId()); Assert.assertEquals( "User [tester] take action [RESTART] on the step", response.getTimelineEvent().getMessage()); AssertHelper.assertThrows( "Cannot manually RESTART the step", MaestroResourceConflictException.class, "There is an ongoing action for this step [sample-dag-test-3][1][1][job1]", () -> actionDao.restartDirectly(restartStepInfo, runRequest, false)); artifact.setNextLoopIndex(9); stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true); AssertHelper.assertThrows( "Cannot manually RESTART the step", MaestroInvalidStatusException.class, "Cannot manually RESTART the step [sample-dag-test-3][1][1][job1] as its status [RUNNING] ", () -> actionDao.restartDirectly(restartStepInfo, runRequest, false)); artifact.setNextLoopIndex(10); artifact.setPendingAction(ForeachAction.builder().build()); stepInstanceDao.insertOrUpsertStepInstance(stepInstance, true); AssertHelper.assertThrows( "Cannot manually RESTART the step", MaestroResourceConflictException.class, "The foreach iteration [10] is not ready to be restarted for foreach step ", () -> actionDao.restartDirectly(restartStepInfo, runRequest, false)); }
@Override public String getSQLListOfSchemas( DatabaseMeta databaseMeta ) { String databaseName = getDatabaseName(); if ( databaseMeta != null ) { databaseName = databaseMeta.environmentSubstitute( databaseName ); } return "SELECT SCHEMA_NAME AS \"name\" FROM " + databaseName + ".INFORMATION_SCHEMA.SCHEMATA"; }
@Test public void testGetSQLListOfSchemasWithParameter() { SnowflakeHVDatabaseMeta snowflakeHVDatabaseMeta = spy( new SnowflakeHVDatabaseMeta() ); String databaseName = UUID.randomUUID().toString(); String databaseNameSubstitute = UUID.randomUUID().toString(); doReturn( databaseName ).when( snowflakeHVDatabaseMeta ).getDatabaseName(); DatabaseMeta databaseMeta = mock( DatabaseMeta.class ); doReturn( databaseNameSubstitute ).when( databaseMeta ).environmentSubstitute( databaseName ); String result = snowflakeHVDatabaseMeta.getSQLListOfSchemas( databaseMeta ); String expected = "SELECT SCHEMA_NAME AS \"name\" FROM " + databaseNameSubstitute + ".INFORMATION_SCHEMA.SCHEMATA"; assertEquals( expected, result ); }
public DefaultMQProducer getProducer() { return producer; }
@Test public void testTransactionListener() { assertThat(((TransactionMQProducer) rocketMQTemplate.getProducer()).getTransactionListener()).isNotNull(); }